1 /*------------------------------------------------------------------------ 2 * Vulkan Conformance Tests 3 * ------------------------ 4 * 5 * Copyright (c) 2017 The Khronos Group Inc. 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); 8 * you may not use this file except in compliance with the License. 9 * You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 * 19 *//*! 20 * \file vktPipelineRenderToImageTests.cpp 21 * \brief Render to image tests 22 *//*--------------------------------------------------------------------*/ 23 24 #include "vktPipelineRenderToImageTests.hpp" 25 #include "vktPipelineMakeUtil.hpp" 26 #include "vktTestCase.hpp" 27 #include "vktTestCaseUtil.hpp" 28 #include "vktPipelineVertexUtil.hpp" 29 #include "vktTestGroupUtil.hpp" 30 31 #include "vkMemUtil.hpp" 32 #include "vkQueryUtil.hpp" 33 #include "vkTypeUtil.hpp" 34 #include "vkRefUtil.hpp" 35 #include "vkBuilderUtil.hpp" 36 #include "vkPrograms.hpp" 37 #include "vkImageUtil.hpp" 38 39 #include "tcuTextureUtil.hpp" 40 #include "tcuImageCompare.hpp" 41 #include "tcuTestLog.hpp" 42 43 #include "deUniquePtr.hpp" 44 #include "deSharedPtr.hpp" 45 46 #include <string> 47 #include <vector> 48 #include <set> 49 50 namespace vkt 51 { 52 namespace pipeline 53 { 54 namespace 55 { 56 using namespace vk; 57 using de::UniquePtr; 58 using de::MovePtr; 59 using de::SharedPtr; 60 using tcu::IVec3; 61 using tcu::Vec4; 62 using tcu::UVec4; 63 using tcu::IVec2; 64 using tcu::IVec4; 65 using tcu::BVec4; 66 using std::vector; 67 68 typedef SharedPtr<Unique<VkImageView> > SharedPtrVkImageView; 69 typedef SharedPtr<Unique<VkPipeline> > SharedPtrVkPipeline; 70 71 enum Constants 72 { 73 NUM_CUBE_FACES = 6, 74 REFERENCE_COLOR_VALUE = 125, 75 REFERENCE_STENCIL_VALUE = 42, 76 MAX_SIZE = -1, //!< Should be queried at runtime and replaced with max possible value 77 MAX_VERIFICATION_REGION_SIZE = 32, //!< Limit the checked area to a small size, especially for huge images 78 MAX_VERIFICATION_REGION_DEPTH = 8, 79 80 MASK_W = (1 | 0 | 0 | 0), 81 MASK_W_LAYERS = (1 | 0 | 0 | 8), 82 MASK_WH = (1 | 2 | 0 | 0), 83 MASK_WH_LAYERS = (1 | 2 | 0 | 8), 84 MASK_WHD = (1 | 2 | 4 | 0), 85 }; 86 87 enum AllocationKind 88 { 89 ALLOCATION_KIND_SUBALLOCATED = 0, 90 ALLOCATION_KIND_DEDICATED, 91 }; 92 93 static const float REFERENCE_DEPTH_VALUE = 1.0f; 94 static const Vec4 COLOR_TABLE[] = 95 { 96 Vec4(0.9f, 0.0f, 0.0f, 1.0f), 97 Vec4(0.6f, 1.0f, 0.0f, 1.0f), 98 Vec4(0.3f, 0.0f, 1.0f, 1.0f), 99 Vec4(0.1f, 1.0f, 1.0f, 1.0f), 100 Vec4(0.8f, 1.0f, 0.0f, 1.0f), 101 Vec4(0.5f, 0.0f, 1.0f, 1.0f), 102 Vec4(0.2f, 0.0f, 0.0f, 1.0f), 103 Vec4(1.0f, 1.0f, 0.0f, 1.0f), 104 }; 105 106 struct CaseDef 107 { 108 VkImageViewType viewType; 109 IVec4 imageSizeHint; //!< (w, h, d, layers), a component may have a symbolic value MAX_SIZE 110 VkFormat colorFormat; 111 VkFormat depthStencilFormat; //! A depth/stencil format, or UNDEFINED if not used 112 AllocationKind allocationKind; 113 }; 114 115 template<typename T> 116 inline SharedPtr<Unique<T> > makeSharedPtr (Move<T> move) 117 { 118 return SharedPtr<Unique<T> >(new Unique<T>(move)); 119 } 120 121 template<typename T> 122 inline VkDeviceSize sizeInBytes (const vector<T>& vec) 123 { 124 return vec.size() * sizeof(vec[0]); 125 } 126 127 inline bool isCube (const VkImageViewType viewType) 128 { 129 return (viewType == VK_IMAGE_VIEW_TYPE_CUBE || viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY); 130 } 131 132 inline VkDeviceSize product (const IVec4& v) 133 { 134 return ((static_cast<VkDeviceSize>(v.x()) * v.y()) * v.z()) * v.w(); 135 } 136 137 template<typename T> 138 inline T sum (const vector<T>& v) 139 { 140 T total = static_cast<T>(0); 141 for (typename vector<T>::const_iterator it = v.begin(); it != v.end(); ++it) 142 total += *it; 143 return total; 144 } 145 146 template <typename T, int Size> 147 int findIndexOfMaxComponent (const tcu::Vector<T, Size>& vec) 148 { 149 int index = 0; 150 T value = vec[0]; 151 152 for (int i = 1; i < Size; ++i) 153 { 154 if (vec[i] > value) 155 { 156 index = i; 157 value = vec[i]; 158 } 159 } 160 161 return index; 162 } 163 164 inline int maxLayersOrDepth (const IVec4& size) 165 { 166 // This is safe because 3D images must have layers (w) = 1 167 return deMax32(size.z(), size.w()); 168 } 169 170 de::MovePtr<Allocation> bindBuffer (const InstanceInterface& vki, 171 const DeviceInterface& vkd, 172 const VkPhysicalDevice& physDevice, 173 const VkDevice device, 174 const VkBuffer& buffer, 175 const MemoryRequirement requirement, 176 Allocator& allocator, 177 AllocationKind allocationKind) 178 { 179 switch (allocationKind) 180 { 181 case ALLOCATION_KIND_SUBALLOCATED: 182 { 183 return ::vkt::pipeline::bindBuffer(vkd, device, allocator, buffer, requirement); 184 } 185 186 case ALLOCATION_KIND_DEDICATED: 187 { 188 return bindBufferDedicated(vki, vkd, physDevice, device, buffer, requirement); 189 } 190 191 default: 192 { 193 TCU_THROW(InternalError, "Invalid allocation kind"); 194 } 195 } 196 } 197 198 de::MovePtr<Allocation> bindImage (const InstanceInterface& vki, 199 const DeviceInterface& vkd, 200 const VkPhysicalDevice& physDevice, 201 const VkDevice device, 202 const VkImage& image, 203 const MemoryRequirement requirement, 204 Allocator& allocator, 205 AllocationKind allocationKind) 206 { 207 switch (allocationKind) 208 { 209 case ALLOCATION_KIND_SUBALLOCATED: 210 { 211 return ::vkt::pipeline::bindImage(vkd, device, allocator, image, requirement); 212 } 213 214 case ALLOCATION_KIND_DEDICATED: 215 { 216 return bindImageDedicated(vki, vkd, physDevice, device, image, requirement); 217 } 218 219 default: 220 { 221 TCU_THROW(InternalError, "Invalid allocation kind"); 222 } 223 } 224 } 225 226 // This is very test specific, so be careful if you want to reuse this code. 227 Move<VkPipeline> makeGraphicsPipeline (const DeviceInterface& vk, 228 const VkDevice device, 229 const VkPipeline basePipeline, // for derivatives 230 const VkPipelineLayout pipelineLayout, 231 const VkRenderPass renderPass, 232 const VkShaderModule vertexModule, 233 const VkShaderModule fragmentModule, 234 const IVec2& renderSize, 235 const VkPrimitiveTopology topology, 236 const deUint32 subpass, 237 const bool useDepth, 238 const bool useStencil) 239 { 240 const VkVertexInputBindingDescription vertexInputBindingDescription = 241 { 242 0u, // uint32_t binding; 243 sizeof(Vertex4RGBA), // uint32_t stride; 244 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate; 245 }; 246 247 const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[] = 248 { 249 { 250 0u, // uint32_t location; 251 0u, // uint32_t binding; 252 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format; 253 0u, // uint32_t offset; 254 }, 255 { 256 1u, // uint32_t location; 257 0u, // uint32_t binding; 258 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format; 259 sizeof(Vec4), // uint32_t offset; 260 } 261 }; 262 263 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo = 264 { 265 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType; 266 DE_NULL, // const void* pNext; 267 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags; 268 1u, // uint32_t vertexBindingDescriptionCount; 269 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions; 270 DE_LENGTH_OF_ARRAY(vertexInputAttributeDescriptions), // uint32_t vertexAttributeDescriptionCount; 271 vertexInputAttributeDescriptions, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; 272 }; 273 274 const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo = 275 { 276 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType; 277 DE_NULL, // const void* pNext; 278 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags; 279 topology, // VkPrimitiveTopology topology; 280 VK_FALSE, // VkBool32 primitiveRestartEnable; 281 }; 282 283 const VkViewport viewport = makeViewport( 284 0.0f, 0.0f, 285 static_cast<float>(renderSize.x()), static_cast<float>(renderSize.y()), 286 0.0f, 1.0f); 287 288 const VkRect2D scissor = 289 { 290 makeOffset2D(0, 0), 291 makeExtent2D(renderSize.x(), renderSize.y()), 292 }; 293 294 const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo = 295 { 296 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType; 297 DE_NULL, // const void* pNext; 298 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags; 299 1u, // uint32_t viewportCount; 300 &viewport, // const VkViewport* pViewports; 301 1u, // uint32_t scissorCount; 302 &scissor, // const VkRect2D* pScissors; 303 }; 304 305 const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo = 306 { 307 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType; 308 DE_NULL, // const void* pNext; 309 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags; 310 VK_FALSE, // VkBool32 depthClampEnable; 311 VK_FALSE, // VkBool32 rasterizerDiscardEnable; 312 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode; 313 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode; 314 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace; 315 VK_FALSE, // VkBool32 depthBiasEnable; 316 0.0f, // float depthBiasConstantFactor; 317 0.0f, // float depthBiasClamp; 318 0.0f, // float depthBiasSlopeFactor; 319 1.0f, // float lineWidth; 320 }; 321 322 const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo = 323 { 324 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType; 325 DE_NULL, // const void* pNext; 326 (VkPipelineMultisampleStateCreateFlags)0, // VkPipelineMultisampleStateCreateFlags flags; 327 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples; 328 VK_FALSE, // VkBool32 sampleShadingEnable; 329 0.0f, // float minSampleShading; 330 DE_NULL, // const VkSampleMask* pSampleMask; 331 VK_FALSE, // VkBool32 alphaToCoverageEnable; 332 VK_FALSE // VkBool32 alphaToOneEnable; 333 }; 334 335 const VkStencilOpState stencilOpState = makeStencilOpState( 336 VK_STENCIL_OP_KEEP, // stencil fail 337 VK_STENCIL_OP_KEEP, // depth & stencil pass 338 VK_STENCIL_OP_KEEP, // depth only fail 339 VK_COMPARE_OP_EQUAL, // compare op 340 ~0u, // compare mask 341 ~0u, // write mask 342 static_cast<deUint32>(REFERENCE_STENCIL_VALUE)); // reference 343 344 VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo = 345 { 346 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType; 347 DE_NULL, // const void* pNext; 348 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags; 349 useDepth, // VkBool32 depthTestEnable; 350 VK_FALSE, // VkBool32 depthWriteEnable; 351 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp; 352 VK_FALSE, // VkBool32 depthBoundsTestEnable; 353 useStencil, // VkBool32 stencilTestEnable; 354 stencilOpState, // VkStencilOpState front; 355 stencilOpState, // VkStencilOpState back; 356 0.0f, // float minDepthBounds; 357 1.0f, // float maxDepthBounds; 358 }; 359 360 const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; 361 // Number of blend attachments must equal the number of color attachments during any subpass. 362 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState = 363 { 364 VK_FALSE, // VkBool32 blendEnable; 365 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor; 366 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor; 367 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp; 368 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor; 369 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor; 370 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp; 371 colorComponentsAll, // VkColorComponentFlags colorWriteMask; 372 }; 373 374 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo = 375 { 376 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType; 377 DE_NULL, // const void* pNext; 378 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags; 379 VK_FALSE, // VkBool32 logicOpEnable; 380 VK_LOGIC_OP_COPY, // VkLogicOp logicOp; 381 1u, // deUint32 attachmentCount; 382 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments; 383 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4]; 384 }; 385 386 const VkPipelineShaderStageCreateInfo pShaderStages[] = 387 { 388 { 389 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType; 390 DE_NULL, // const void* pNext; 391 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags; 392 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage; 393 vertexModule, // VkShaderModule module; 394 "main", // const char* pName; 395 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo; 396 }, 397 { 398 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType; 399 DE_NULL, // const void* pNext; 400 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags; 401 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage; 402 fragmentModule, // VkShaderModule module; 403 "main", // const char* pName; 404 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo; 405 } 406 }; 407 408 const VkPipelineCreateFlags flags = (basePipeline == DE_NULL ? VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT 409 : VK_PIPELINE_CREATE_DERIVATIVE_BIT); 410 411 const VkGraphicsPipelineCreateInfo graphicsPipelineInfo = 412 { 413 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType; 414 DE_NULL, // const void* pNext; 415 flags, // VkPipelineCreateFlags flags; 416 DE_LENGTH_OF_ARRAY(pShaderStages), // deUint32 stageCount; 417 pShaderStages, // const VkPipelineShaderStageCreateInfo* pStages; 418 &vertexInputStateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState; 419 &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; 420 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState; 421 &pipelineViewportStateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState; 422 &pipelineRasterizationStateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState; 423 &pipelineMultisampleStateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState; 424 &pipelineDepthStencilStateInfo, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; 425 &pipelineColorBlendStateInfo, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState; 426 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState; 427 pipelineLayout, // VkPipelineLayout layout; 428 renderPass, // VkRenderPass renderPass; 429 subpass, // deUint32 subpass; 430 basePipeline, // VkPipeline basePipelineHandle; 431 -1, // deInt32 basePipelineIndex; 432 }; 433 434 return createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineInfo); 435 } 436 437 //! Make a render pass with one subpass per color attachment and depth/stencil attachment (if used). 438 Move<VkRenderPass> makeRenderPass (const DeviceInterface& vk, 439 const VkDevice device, 440 const VkFormat colorFormat, 441 const VkFormat depthStencilFormat, 442 const deUint32 numLayers, 443 const VkImageLayout initialColorImageLayout = VK_IMAGE_LAYOUT_UNDEFINED, 444 const VkImageLayout initialDepthStencilImageLayout = VK_IMAGE_LAYOUT_UNDEFINED) 445 { 446 const VkAttachmentDescription colorAttachmentDescription = 447 { 448 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags; 449 colorFormat, // VkFormat format; 450 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples; 451 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp; 452 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp; 453 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp; 454 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp; 455 initialColorImageLayout, // VkImageLayout initialLayout; 456 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout; 457 }; 458 vector<VkAttachmentDescription> attachmentDescriptions(numLayers, colorAttachmentDescription); 459 460 const VkAttachmentDescription depthStencilAttachmentDescription = 461 { 462 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags; 463 depthStencilFormat, // VkFormat format; 464 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples; 465 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp; 466 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp storeOp; 467 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp stencilLoadOp; 468 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp; 469 initialDepthStencilImageLayout, // VkImageLayout initialLayout; 470 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout; 471 }; 472 473 if (depthStencilFormat != VK_FORMAT_UNDEFINED) 474 attachmentDescriptions.insert(attachmentDescriptions.end(), numLayers, depthStencilAttachmentDescription); 475 476 // Create a subpass for each attachment (each attachement is a layer of an arrayed image). 477 vector<VkAttachmentReference> colorAttachmentReferences (numLayers); 478 vector<VkAttachmentReference> depthStencilAttachmentReferences(numLayers); 479 vector<VkSubpassDescription> subpasses; 480 481 // Ordering here must match the framebuffer attachments 482 for (deUint32 i = 0; i < numLayers; ++i) 483 { 484 const VkAttachmentReference attachmentRef = 485 { 486 i, // deUint32 attachment; 487 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout; 488 }; 489 const VkAttachmentReference depthStencilAttachmentRef = 490 { 491 i + numLayers, // deUint32 attachment; 492 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // VkImageLayout layout; 493 }; 494 495 colorAttachmentReferences[i] = attachmentRef; 496 depthStencilAttachmentReferences[i] = depthStencilAttachmentRef; 497 498 const VkAttachmentReference* pDepthStencilAttachment = (depthStencilFormat != VK_FORMAT_UNDEFINED ? &depthStencilAttachmentReferences[i] : DE_NULL); 499 const VkSubpassDescription subpassDescription = 500 { 501 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags; 502 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint; 503 0u, // deUint32 inputAttachmentCount; 504 DE_NULL, // const VkAttachmentReference* pInputAttachments; 505 1u, // deUint32 colorAttachmentCount; 506 &colorAttachmentReferences[i], // const VkAttachmentReference* pColorAttachments; 507 DE_NULL, // const VkAttachmentReference* pResolveAttachments; 508 pDepthStencilAttachment, // const VkAttachmentReference* pDepthStencilAttachment; 509 0u, // deUint32 preserveAttachmentCount; 510 DE_NULL // const deUint32* pPreserveAttachments; 511 }; 512 subpasses.push_back(subpassDescription); 513 } 514 515 const VkRenderPassCreateInfo renderPassInfo = 516 { 517 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType; 518 DE_NULL, // const void* pNext; 519 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags; 520 static_cast<deUint32>(attachmentDescriptions.size()), // deUint32 attachmentCount; 521 &attachmentDescriptions[0], // const VkAttachmentDescription* pAttachments; 522 static_cast<deUint32>(subpasses.size()), // deUint32 subpassCount; 523 &subpasses[0], // const VkSubpassDescription* pSubpasses; 524 0u, // deUint32 dependencyCount; 525 DE_NULL // const VkSubpassDependency* pDependencies; 526 }; 527 528 return createRenderPass(vk, device, &renderPassInfo); 529 } 530 531 Move<VkImage> makeImage (const DeviceInterface& vk, 532 const VkDevice device, 533 VkImageCreateFlags flags, 534 VkImageType imageType, 535 const VkFormat format, 536 const IVec3& size, 537 const deUint32 numMipLevels, 538 const deUint32 numLayers, 539 const VkImageUsageFlags usage) 540 { 541 const VkImageCreateInfo imageParams = 542 { 543 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType; 544 DE_NULL, // const void* pNext; 545 flags, // VkImageCreateFlags flags; 546 imageType, // VkImageType imageType; 547 format, // VkFormat format; 548 makeExtent3D(size), // VkExtent3D extent; 549 numMipLevels, // deUint32 mipLevels; 550 numLayers, // deUint32 arrayLayers; 551 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples; 552 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling; 553 usage, // VkImageUsageFlags usage; 554 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; 555 0u, // deUint32 queueFamilyIndexCount; 556 DE_NULL, // const deUint32* pQueueFamilyIndices; 557 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout; 558 }; 559 return createImage(vk, device, &imageParams); 560 } 561 562 inline Move<VkBuffer> makeBuffer (const DeviceInterface& vk, const VkDevice device, const VkDeviceSize bufferSize, const VkBufferUsageFlags usage) 563 { 564 const VkBufferCreateInfo bufferCreateInfo = makeBufferCreateInfo(bufferSize, usage); 565 return createBuffer(vk, device, &bufferCreateInfo); 566 } 567 568 inline VkImageSubresourceRange makeColorSubresourceRange (const int baseArrayLayer, const int layerCount) 569 { 570 return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, static_cast<deUint32>(baseArrayLayer), static_cast<deUint32>(layerCount)); 571 } 572 573 //! Get a reference clear value based on color format. 574 VkClearValue getClearValue (const VkFormat format) 575 { 576 if (isUintFormat(format) || isIntFormat(format)) 577 return makeClearValueColorU32(REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE); 578 else 579 return makeClearValueColorF32(1.0f, 1.0f, 1.0f, 1.0f); 580 } 581 582 std::string getColorFormatStr (const int numComponents, const bool isUint, const bool isSint) 583 { 584 std::ostringstream str; 585 if (numComponents == 1) 586 str << (isUint ? "uint" : isSint ? "int" : "float"); 587 else 588 str << (isUint ? "u" : isSint ? "i" : "") << "vec" << numComponents; 589 590 return str.str(); 591 } 592 593 //! A half-viewport quad. Use with TRIANGLE_STRIP topology. 594 vector<Vertex4RGBA> genFullQuadVertices (const int subpassCount) 595 { 596 vector<Vertex4RGBA> vectorData; 597 for (int subpassNdx = 0; subpassNdx < subpassCount; ++subpassNdx) 598 { 599 Vertex4RGBA data = 600 { 601 Vec4(0.0f, -1.0f, 0.0f, 1.0f), 602 COLOR_TABLE[subpassNdx % DE_LENGTH_OF_ARRAY(COLOR_TABLE)], 603 }; 604 vectorData.push_back(data); 605 data.position = Vec4(0.0f, 1.0f, 0.0f, 1.0f); 606 vectorData.push_back(data); 607 data.position = Vec4(1.0f, -1.0f, 0.0f, 1.0f); 608 vectorData.push_back(data); 609 data.position = Vec4(1.0f, 1.0f, 0.0f, 1.0f); 610 vectorData.push_back(data); 611 } 612 return vectorData; 613 } 614 615 VkImageType getImageType (const VkImageViewType viewType) 616 { 617 switch (viewType) 618 { 619 case VK_IMAGE_VIEW_TYPE_1D: 620 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: 621 return VK_IMAGE_TYPE_1D; 622 623 case VK_IMAGE_VIEW_TYPE_2D: 624 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: 625 case VK_IMAGE_VIEW_TYPE_CUBE: 626 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: 627 return VK_IMAGE_TYPE_2D; 628 629 case VK_IMAGE_VIEW_TYPE_3D: 630 return VK_IMAGE_TYPE_3D; 631 632 default: 633 DE_ASSERT(0); 634 return VK_IMAGE_TYPE_LAST; 635 } 636 } 637 638 //! ImageViewType for accessing a single layer/slice of an image 639 VkImageViewType getImageViewSliceType (const VkImageViewType viewType) 640 { 641 switch (viewType) 642 { 643 case VK_IMAGE_VIEW_TYPE_1D: 644 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: 645 return VK_IMAGE_VIEW_TYPE_1D; 646 647 case VK_IMAGE_VIEW_TYPE_2D: 648 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: 649 case VK_IMAGE_VIEW_TYPE_CUBE: 650 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: 651 case VK_IMAGE_VIEW_TYPE_3D: 652 return VK_IMAGE_VIEW_TYPE_2D; 653 654 default: 655 DE_ASSERT(0); 656 return VK_IMAGE_VIEW_TYPE_LAST; 657 } 658 } 659 660 VkImageCreateFlags getImageCreateFlags (const VkImageViewType viewType) 661 { 662 VkImageCreateFlags flags = (VkImageCreateFlags)0; 663 664 if (viewType == VK_IMAGE_VIEW_TYPE_3D) flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR; 665 if (isCube(viewType)) flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; 666 667 return flags; 668 } 669 670 void generateExpectedImage (const tcu::PixelBufferAccess& outputImage, const IVec2& renderSize, const int colorDepthOffset) 671 { 672 const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(outputImage.getFormat().type); 673 const bool isInt = (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER || channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER); 674 const VkClearValue clearValue = getClearValue(mapTextureFormat(outputImage.getFormat())); 675 676 if (isInt) 677 tcu::clear(outputImage, IVec4(clearValue.color.int32)); 678 else 679 tcu::clear(outputImage, Vec4(clearValue.color.float32)); 680 681 for (int z = 0; z < outputImage.getDepth(); ++z) 682 { 683 const Vec4& setColor = COLOR_TABLE[(z + colorDepthOffset) % DE_LENGTH_OF_ARRAY(COLOR_TABLE)]; 684 const IVec4 setColorInt = (static_cast<float>(REFERENCE_COLOR_VALUE) * setColor).cast<deInt32>(); 685 686 for (int y = 0; y < renderSize.y(); ++y) 687 for (int x = renderSize.x()/2; x < renderSize.x(); ++x) 688 { 689 if (isInt) 690 outputImage.setPixel(setColorInt, x, y, z); 691 else 692 outputImage.setPixel(setColor, x, y, z); 693 } 694 } 695 } 696 697 deUint32 selectMatchingMemoryType (const VkPhysicalDeviceMemoryProperties& deviceMemProps, deUint32 allowedMemTypeBits, MemoryRequirement requirement) 698 { 699 const deUint32 compatibleTypes = getCompatibleMemoryTypes(deviceMemProps, requirement); 700 const deUint32 candidates = allowedMemTypeBits & compatibleTypes; 701 702 if (candidates == 0) 703 TCU_THROW(NotSupportedError, "No compatible memory type found"); 704 705 return (deUint32)deCtz32(candidates); 706 } 707 708 IVec4 getMaxImageSize (const VkImageViewType viewType, const IVec4& sizeHint) 709 { 710 //Limits have been taken from the vulkan specification 711 IVec4 size = IVec4( 712 sizeHint.x() != MAX_SIZE ? sizeHint.x() : 4096, 713 sizeHint.y() != MAX_SIZE ? sizeHint.y() : 4096, 714 sizeHint.z() != MAX_SIZE ? sizeHint.z() : 256, 715 sizeHint.w() != MAX_SIZE ? sizeHint.w() : 256); 716 717 switch (viewType) 718 { 719 case VK_IMAGE_VIEW_TYPE_1D: 720 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: 721 size.x() = deMin32(4096, size.x()); 722 break; 723 724 case VK_IMAGE_VIEW_TYPE_2D: 725 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: 726 size.x() = deMin32(4096, size.x()); 727 size.y() = deMin32(4096, size.y()); 728 break; 729 730 case VK_IMAGE_VIEW_TYPE_3D: 731 size.x() = deMin32(256, size.x()); 732 size.y() = deMin32(256, size.y()); 733 break; 734 735 case VK_IMAGE_VIEW_TYPE_CUBE: 736 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: 737 size.x() = deMin32(4096, size.x()); 738 size.y() = deMin32(4096, size.y()); 739 size.w() = deMin32(252, size.w()); 740 size.w() = NUM_CUBE_FACES * (size.w() / NUM_CUBE_FACES); // round down to 6 faces 741 break; 742 743 default: 744 DE_ASSERT(0); 745 return IVec4(); 746 } 747 748 return size; 749 } 750 751 deUint32 getMemoryTypeNdx (Context& context, const CaseDef& caseDef) 752 { 753 const DeviceInterface& vk = context.getDeviceInterface(); 754 const InstanceInterface& vki = context.getInstanceInterface(); 755 const VkDevice device = context.getDevice(); 756 const VkPhysicalDevice physDevice = context.getPhysicalDevice(); 757 758 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physDevice); 759 Move<VkImage> colorImage; 760 VkMemoryRequirements memReqs; 761 762 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; 763 const IVec4 imageSize = getMaxImageSize(caseDef.viewType, caseDef.imageSizeHint); 764 765 //create image, don't bind any memory to it 766 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat, 767 imageSize.swizzle(0, 1, 2), 1u, imageSize.w(), imageUsage); 768 769 vk.getImageMemoryRequirements(device, *colorImage, &memReqs); 770 return selectMatchingMemoryType(memoryProperties, memReqs.memoryTypeBits, MemoryRequirement::Any); 771 } 772 773 VkDeviceSize getMaxDeviceHeapSize (Context& context, const CaseDef& caseDef) 774 { 775 const InstanceInterface& vki = context.getInstanceInterface(); 776 const VkPhysicalDevice physDevice = context.getPhysicalDevice(); 777 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physDevice); 778 const deUint32 memoryTypeNdx = getMemoryTypeNdx (context, caseDef); 779 780 return memoryProperties.memoryHeaps[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].size; 781 } 782 783 //! Get a smaller image size. Returns a vector of zeroes, if it can't reduce more. 784 IVec4 getReducedImageSize (const CaseDef& caseDef, IVec4 size) 785 { 786 const int maxIndex = findIndexOfMaxComponent(size); 787 const int reducedSize = size[maxIndex] >> 1; 788 789 switch (caseDef.viewType) 790 { 791 case VK_IMAGE_VIEW_TYPE_CUBE: 792 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: 793 if (maxIndex < 2) 794 size.x() = size.y() = reducedSize; 795 else if (maxIndex == 3 && reducedSize >= NUM_CUBE_FACES) 796 size.w() = NUM_CUBE_FACES * (reducedSize / NUM_CUBE_FACES); // round down to a multiple of 6 797 else 798 size = IVec4(0); 799 break; 800 801 default: 802 size[maxIndex] = reducedSize; 803 break; 804 } 805 806 if (reducedSize == 0) 807 size = IVec4(0); 808 809 return size; 810 } 811 812 bool isDepthStencilFormatSupported (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const VkFormat format) 813 { 814 const VkFormatProperties properties = getPhysicalDeviceFormatProperties(vki, physDevice, format); 815 return (properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0; 816 } 817 818 VkImageAspectFlags getFormatAspectFlags (const VkFormat format) 819 { 820 if (format == VK_FORMAT_UNDEFINED) 821 return 0; 822 823 const tcu::TextureFormat::ChannelOrder order = mapVkFormat(format).order; 824 825 switch (order) 826 { 827 case tcu::TextureFormat::DS: return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; 828 case tcu::TextureFormat::D: return VK_IMAGE_ASPECT_DEPTH_BIT; 829 case tcu::TextureFormat::S: return VK_IMAGE_ASPECT_STENCIL_BIT; 830 default: return VK_IMAGE_ASPECT_COLOR_BIT; 831 } 832 } 833 834 void initPrograms (SourceCollections& programCollection, const CaseDef caseDef) 835 { 836 const int numComponents = getNumUsedChannels(mapVkFormat(caseDef.colorFormat).order); 837 const bool isUint = isUintFormat(caseDef.colorFormat); 838 const bool isSint = isIntFormat(caseDef.colorFormat); 839 840 // Vertex shader 841 { 842 std::ostringstream src; 843 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n" 844 << "\n" 845 << "layout(location = 0) in vec4 in_position;\n" 846 << "layout(location = 1) in vec4 in_color;\n" 847 << "layout(location = 0) out vec4 out_color;\n" 848 << "\n" 849 << "out gl_PerVertex {\n" 850 << " vec4 gl_Position;\n" 851 << "};\n" 852 << "\n" 853 << "void main(void)\n" 854 << "{\n" 855 << " gl_Position = in_position;\n" 856 << " out_color = in_color;\n" 857 << "}\n"; 858 859 programCollection.glslSources.add("vert") << glu::VertexSource(src.str()); 860 } 861 862 // Fragment shader 863 { 864 std::ostringstream colorValue; 865 colorValue << REFERENCE_COLOR_VALUE; 866 const std::string colorFormat = getColorFormatStr(numComponents, isUint, isSint); 867 const std::string colorInteger = (isUint || isSint ? " * "+colorFormat+"("+colorValue.str()+")" :""); 868 869 std::ostringstream src; 870 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n" 871 << "\n" 872 << "layout(location = 0) in vec4 in_color;\n" 873 << "layout(location = 0) out " << colorFormat << " o_color;\n" 874 << "\n" 875 << "void main(void)\n" 876 << "{\n" 877 << " o_color = " << colorFormat << "(" 878 << (numComponents == 1 ? "in_color.r" : 879 numComponents == 2 ? "in_color.rg" : 880 numComponents == 3 ? "in_color.rgb" : "in_color") 881 << colorInteger 882 << ");\n" 883 << "}\n"; 884 885 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str()); 886 } 887 } 888 889 //! See testAttachmentSize() description 890 tcu::TestStatus testWithSizeReduction (Context& context, const CaseDef& caseDef) 891 { 892 const DeviceInterface& vk = context.getDeviceInterface(); 893 const InstanceInterface& vki = context.getInstanceInterface(); 894 const VkDevice device = context.getDevice(); 895 const VkPhysicalDevice physDevice = context.getPhysicalDevice(); 896 const VkQueue queue = context.getUniversalQueue(); 897 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex(); 898 Allocator& allocator = context.getDefaultAllocator(); 899 900 // The memory might be too small to allocate a largest possible attachment, so try to account for that. 901 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED); 902 903 IVec4 imageSize = getMaxImageSize(caseDef.viewType, caseDef.imageSizeHint); 904 VkDeviceSize colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat)); 905 VkDeviceSize depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull); 906 907 const VkDeviceSize reserveForChecking = 500ull * 1024ull; //left 512KB 908 const float additionalMemory = 1.15f; //left some free memory on device (15%) 909 VkDeviceSize neededMemory = static_cast<VkDeviceSize>(static_cast<float>(colorSize + depthStencilSize) * additionalMemory) + reserveForChecking; 910 VkDeviceSize maxMemory = getMaxDeviceHeapSize(context, caseDef) >> 2; 911 912 const VkDeviceSize deviceMemoryBudget = std::min(neededMemory, maxMemory); 913 bool allocationPossible = false; 914 915 // Keep reducing the size, if image size is too big 916 while (neededMemory > deviceMemoryBudget) 917 { 918 imageSize = getReducedImageSize(caseDef, imageSize); 919 920 if (imageSize == IVec4()) 921 return tcu::TestStatus::fail("Couldn't create an image with required size"); 922 923 colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat)); 924 depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull); 925 neededMemory = static_cast<VkDeviceSize>(static_cast<double>(colorSize + depthStencilSize) * additionalMemory); 926 } 927 928 // Keep reducing the size, if allocation return out of any memory 929 while (!allocationPossible) 930 { 931 VkDeviceMemory object = 0; 932 const VkMemoryAllocateInfo allocateInfo = 933 { 934 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, //VkStructureType sType; 935 DE_NULL, //const void* pNext; 936 neededMemory, //VkDeviceSize allocationSize; 937 getMemoryTypeNdx(context, caseDef) //deUint32 memoryTypeIndex; 938 }; 939 940 const VkResult result = vk.allocateMemory(device, &allocateInfo, DE_NULL, &object); 941 942 if (VK_ERROR_OUT_OF_DEVICE_MEMORY == result || VK_ERROR_OUT_OF_HOST_MEMORY == result) 943 { 944 imageSize = getReducedImageSize(caseDef, imageSize); 945 946 if (imageSize == IVec4()) 947 return tcu::TestStatus::fail("Couldn't create an image with required size"); 948 949 colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat)); 950 depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull); 951 neededMemory = static_cast<VkDeviceSize>(static_cast<double>(colorSize + depthStencilSize) * additionalMemory) + reserveForChecking; 952 } 953 else if (VK_SUCCESS != result) 954 { 955 return tcu::TestStatus::fail("Couldn't allocate memory"); 956 } 957 else 958 { 959 //free memory using Move pointer 960 Move<VkDeviceMemory> memoryAllocated (check<VkDeviceMemory>(object), Deleter<VkDeviceMemory>(vk, device, DE_NULL)); 961 allocationPossible = true; 962 } 963 } 964 965 context.getTestContext().getLog() 966 << tcu::TestLog::Message << "Using an image with size (width, height, depth, layers) = " << imageSize << tcu::TestLog::EndMessage; 967 968 // "Slices" is either the depth of a 3D image, or the number of layers of an arrayed image 969 const deInt32 numSlices = maxLayersOrDepth(imageSize); 970 971 972 if (useDepthStencil && !isDepthStencilFormatSupported(vki, physDevice, caseDef.depthStencilFormat)) 973 TCU_THROW(NotSupportedError, "Unsupported depth/stencil format"); 974 975 // Determine the verification bounds. The checked region will be in the center of the rendered image 976 const IVec4 checkSize = tcu::min(imageSize, IVec4(MAX_VERIFICATION_REGION_SIZE, 977 MAX_VERIFICATION_REGION_SIZE, 978 MAX_VERIFICATION_REGION_DEPTH, 979 MAX_VERIFICATION_REGION_DEPTH)); 980 const IVec4 checkOffset = (imageSize - checkSize) / 2; 981 982 // Only make enough space for the check region 983 const VkDeviceSize colorBufferSize = product(checkSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat)); 984 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT)); 985 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind)); 986 987 { 988 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize)); 989 flushMappedMemoryRange(vk, device, colorBufferAlloc->getMemory(), colorBufferAlloc->getOffset(), VK_WHOLE_SIZE); 990 } 991 992 const Unique<VkShaderModule> vertexModule (createShaderModule (vk, device, context.getBinaryCollection().get("vert"), 0u)); 993 const Unique<VkShaderModule> fragmentModule (createShaderModule (vk, device, context.getBinaryCollection().get("frag"), 0u)); 994 const Unique<VkRenderPass> renderPass (makeRenderPass (vk, device, caseDef.colorFormat, caseDef.depthStencilFormat, static_cast<deUint32>(numSlices), 995 (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D) ? VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL 996 : VK_IMAGE_LAYOUT_UNDEFINED)); 997 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout (vk, device)); 998 vector<SharedPtrVkPipeline> pipelines; 999 1000 Move<VkImage> colorImage; 1001 MovePtr<Allocation> colorImageAlloc; 1002 vector<SharedPtrVkImageView> colorAttachments; 1003 Move<VkImage> depthStencilImage; 1004 MovePtr<Allocation> depthStencilImageAlloc; 1005 vector<SharedPtrVkImageView> depthStencilAttachments; 1006 vector<VkImageView> attachmentHandles; // all attachments (color and d/s) 1007 Move<VkBuffer> vertexBuffer; 1008 MovePtr<Allocation> vertexBufferAlloc; 1009 Move<VkFramebuffer> framebuffer; 1010 1011 // Create a color image 1012 { 1013 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; 1014 1015 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat, 1016 imageSize.swizzle(0, 1, 2), 1u, imageSize.w(), imageUsage); 1017 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator, caseDef.allocationKind); 1018 } 1019 1020 // Create a depth/stencil image (always a 2D image, optionally layered) 1021 if (useDepthStencil) 1022 { 1023 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; 1024 1025 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat, 1026 IVec3(imageSize.x(), imageSize.y(), 1), 1u, numSlices, imageUsage); 1027 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any, allocator, caseDef.allocationKind); 1028 } 1029 1030 // Create a vertex buffer 1031 { 1032 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices); 1033 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices); 1034 1035 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); 1036 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind); 1037 1038 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize)); 1039 flushMappedMemoryRange(vk, device, vertexBufferAlloc->getMemory(), vertexBufferAlloc->getOffset(), vertexBufferSize); 1040 } 1041 1042 // Prepare color image upfront for rendering to individual slices. 3D slices aren't separate subresources, so they shouldn't be transitioned 1043 // during each subpass like array layers. 1044 if (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D) 1045 { 1046 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex)); 1047 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool)); 1048 1049 beginCommandBuffer(vk, *cmdBuffer); 1050 1051 const VkImageMemoryBarrier imageBarrier = 1052 { 1053 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType; 1054 DE_NULL, // const void* pNext; 1055 (VkAccessFlags)0, // VkAccessFlags srcAccessMask; 1056 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask; 1057 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout; 1058 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout; 1059 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex; 1060 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex; 1061 *colorImage, // VkImage image; 1062 { // VkImageSubresourceRange subresourceRange; 1063 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask; 1064 0u, // uint32_t baseMipLevel; 1065 1u, // uint32_t levelCount; 1066 0u, // uint32_t baseArrayLayer; 1067 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount; 1068 } 1069 }; 1070 1071 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0u, 1072 0u, DE_NULL, 0u, DE_NULL, 1u, &imageBarrier); 1073 1074 VK_CHECK(vk.endCommandBuffer(*cmdBuffer)); 1075 submitCommandsAndWait(vk, device, queue, *cmdBuffer); 1076 } 1077 1078 // For each image layer or slice (3D), create an attachment and a pipeline 1079 { 1080 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat); 1081 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0; 1082 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0; 1083 VkPipeline basePipeline = DE_NULL; 1084 1085 // Color attachments are first in the framebuffer 1086 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx) 1087 { 1088 colorAttachments.push_back(makeSharedPtr( 1089 makeImageView(vk, device, *colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat, makeColorSubresourceRange(subpassNdx, 1)))); 1090 attachmentHandles.push_back(**colorAttachments.back()); 1091 1092 // We also have to create pipelines for each subpass 1093 pipelines.push_back(makeSharedPtr(makeGraphicsPipeline( 1094 vk, device, basePipeline, *pipelineLayout, *renderPass, *vertexModule, *fragmentModule, imageSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, 1095 static_cast<deUint32>(subpassNdx), useDepth, useStencil))); 1096 1097 basePipeline = **pipelines.front(); 1098 } 1099 1100 // Then D/S attachments, if any 1101 if (useDepthStencil) 1102 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx) 1103 { 1104 depthStencilAttachments.push_back(makeSharedPtr( 1105 makeImageView(vk, device, *depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat, makeImageSubresourceRange(depthStencilAspect, 0u, 1u, subpassNdx, 1u)))); 1106 attachmentHandles.push_back(**depthStencilAttachments.back()); 1107 } 1108 } 1109 1110 framebuffer = makeFramebuffer(vk, device, *renderPass, static_cast<deUint32>(attachmentHandles.size()), &attachmentHandles[0], static_cast<deUint32>(imageSize.x()), static_cast<deUint32>(imageSize.y())); 1111 1112 { 1113 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex)); 1114 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool)); 1115 1116 beginCommandBuffer(vk, *cmdBuffer); 1117 { 1118 vector<VkClearValue> clearValues (numSlices, getClearValue(caseDef.colorFormat)); 1119 1120 if (useDepthStencil) 1121 clearValues.insert(clearValues.end(), numSlices, makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE)); 1122 1123 const VkRect2D renderArea = 1124 { 1125 makeOffset2D(0, 0), 1126 makeExtent2D(imageSize.x(), imageSize.y()), 1127 }; 1128 const VkRenderPassBeginInfo renderPassBeginInfo = 1129 { 1130 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType; 1131 DE_NULL, // const void* pNext; 1132 *renderPass, // VkRenderPass renderPass; 1133 *framebuffer, // VkFramebuffer framebuffer; 1134 renderArea, // VkRect2D renderArea; 1135 static_cast<deUint32>(clearValues.size()), // uint32_t clearValueCount; 1136 &clearValues[0], // const VkClearValue* pClearValues; 1137 }; 1138 const VkDeviceSize vertexBufferOffset = 0ull; 1139 1140 vk.cmdBeginRenderPass(*cmdBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); 1141 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset); 1142 } 1143 1144 // Draw 1145 for (deUint32 subpassNdx = 0; subpassNdx < static_cast<deUint32>(numSlices); ++subpassNdx) 1146 { 1147 if (subpassNdx != 0) 1148 vk.cmdNextSubpass(*cmdBuffer, VK_SUBPASS_CONTENTS_INLINE); 1149 1150 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, **pipelines[subpassNdx]); 1151 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx*4u, 0u); 1152 } 1153 1154 vk.cmdEndRenderPass(*cmdBuffer); 1155 1156 // Copy colorImage -> host visible colorBuffer 1157 { 1158 const VkImageMemoryBarrier imageBarriers[] = 1159 { 1160 { 1161 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType; 1162 DE_NULL, // const void* pNext; 1163 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags outputMask; 1164 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags inputMask; 1165 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout; 1166 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout; 1167 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex; 1168 VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex; 1169 *colorImage, // VkImage image; 1170 makeColorSubresourceRange(0, imageSize.w()) // VkImageSubresourceRange subresourceRange; 1171 } 1172 }; 1173 1174 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 1175 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers); 1176 1177 // Copy the checked region rather than the whole image 1178 const VkImageSubresourceLayers subresource = 1179 { 1180 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask; 1181 0u, // uint32_t mipLevel; 1182 static_cast<deUint32>(checkOffset.w()), // uint32_t baseArrayLayer; 1183 static_cast<deUint32>(checkSize.w()), // uint32_t layerCount; 1184 }; 1185 1186 const VkBufferImageCopy region = 1187 { 1188 0ull, // VkDeviceSize bufferOffset; 1189 0u, // uint32_t bufferRowLength; 1190 0u, // uint32_t bufferImageHeight; 1191 subresource, // VkImageSubresourceLayers imageSubresource; 1192 makeOffset3D(checkOffset.x(), checkOffset.y(), checkOffset.z()), // VkOffset3D imageOffset; 1193 makeExtent3D(checkSize.swizzle(0, 1, 2)), // VkExtent3D imageExtent; 1194 }; 1195 1196 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ®ion); 1197 1198 const VkBufferMemoryBarrier bufferBarriers[] = 1199 { 1200 { 1201 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType; 1202 DE_NULL, // const void* pNext; 1203 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask; 1204 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask; 1205 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex; 1206 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex; 1207 *colorBuffer, // VkBuffer buffer; 1208 0ull, // VkDeviceSize offset; 1209 VK_WHOLE_SIZE, // VkDeviceSize size; 1210 }, 1211 }; 1212 1213 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1214 0u, DE_NULL, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, DE_NULL); 1215 } 1216 1217 VK_CHECK(vk.endCommandBuffer(*cmdBuffer)); 1218 submitCommandsAndWait(vk, device, queue, *cmdBuffer); 1219 } 1220 1221 // Verify results 1222 { 1223 invalidateMappedMemoryRange(vk, device, colorBufferAlloc->getMemory(), colorBufferAlloc->getOffset(), VK_WHOLE_SIZE); 1224 1225 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat); 1226 const int checkDepth = maxLayersOrDepth(checkSize); 1227 const int depthOffset = maxLayersOrDepth(checkOffset); 1228 const tcu::ConstPixelBufferAccess resultImage (format, checkSize.x(), checkSize.y(), checkDepth, colorBufferAlloc->getHostPtr()); 1229 tcu::TextureLevel textureLevel (format, checkSize.x(), checkSize.y(), checkDepth); 1230 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess(); 1231 bool ok = false; 1232 1233 generateExpectedImage(expectedImage, checkSize.swizzle(0, 1), depthOffset); 1234 1235 if (isFloatFormat(caseDef.colorFormat)) 1236 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage, resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT); 1237 else 1238 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage, resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT); 1239 1240 return ok ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail"); 1241 } 1242 } 1243 1244 void checkImageViewTypeRequirements (Context& context, const VkImageViewType viewType) 1245 { 1246 if (viewType == VK_IMAGE_VIEW_TYPE_3D && 1247 (!isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_KHR_maintenance1"))) 1248 TCU_THROW(NotSupportedError, "Extension VK_KHR_maintenance1 not supported"); 1249 1250 if (viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !context.getDeviceFeatures().imageCubeArray) 1251 TCU_THROW(NotSupportedError, "Missing feature: imageCubeArray"); 1252 } 1253 1254 //! A test that can exercise very big color and depth/stencil attachment sizes. 1255 //! If the total memory consumed by images is too large, or if the implementation returns OUT_OF_MEMORY error somewhere, 1256 //! the test can be retried with a next increment of size reduction index, making the attachments smaller. 1257 tcu::TestStatus testAttachmentSize (Context& context, const CaseDef caseDef) 1258 { 1259 checkImageViewTypeRequirements(context, caseDef.viewType); 1260 1261 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED) 1262 { 1263 if (!isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_KHR_dedicated_allocation")) 1264 TCU_THROW(NotSupportedError, "VK_KHR_dedicated_allocation is not supported"); 1265 } 1266 1267 return testWithSizeReduction(context, caseDef); 1268 // Never reached 1269 } 1270 1271 vector<IVec4> getMipLevelSizes (IVec4 baseSize) 1272 { 1273 vector<IVec4> levels; 1274 levels.push_back(baseSize); 1275 1276 while (baseSize.x() != 1 || baseSize.y() != 1 || baseSize.z() != 1) 1277 { 1278 baseSize.x() = deMax32(baseSize.x() >> 1, 1); 1279 baseSize.y() = deMax32(baseSize.y() >> 1, 1); 1280 baseSize.z() = deMax32(baseSize.z() >> 1, 1); 1281 levels.push_back(baseSize); 1282 } 1283 1284 return levels; 1285 } 1286 1287 //! Compute memory consumed by each mip level, including all layers. Sizes include a padding for alignment. 1288 vector<VkDeviceSize> getPerMipLevelStorageSize (const vector<IVec4>& mipLevelSizes, const VkDeviceSize pixelSize) 1289 { 1290 const deInt64 levelAlignment = 16; 1291 vector<VkDeviceSize> storageSizes; 1292 1293 for (vector<IVec4>::const_iterator it = mipLevelSizes.begin(); it != mipLevelSizes.end(); ++it) 1294 storageSizes.push_back(deAlign64(pixelSize * product(*it), levelAlignment)); 1295 1296 return storageSizes; 1297 } 1298 1299 void drawToMipLevel (const Context& context, 1300 const CaseDef& caseDef, 1301 const int mipLevel, 1302 const IVec4& mipSize, 1303 const int numSlices, 1304 const VkImage colorImage, 1305 const VkImage depthStencilImage, 1306 const VkBuffer vertexBuffer, 1307 const VkPipelineLayout pipelineLayout, 1308 const VkShaderModule vertexModule, 1309 const VkShaderModule fragmentModule) 1310 { 1311 const DeviceInterface& vk = context.getDeviceInterface(); 1312 const VkDevice device = context.getDevice(); 1313 const VkQueue queue = context.getUniversalQueue(); 1314 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex(); 1315 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat); 1316 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0; 1317 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0; 1318 const Unique<VkRenderPass> renderPass (makeRenderPass(vk, device, caseDef.colorFormat, caseDef.depthStencilFormat, static_cast<deUint32>(numSlices), 1319 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1320 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)); 1321 vector<SharedPtrVkPipeline> pipelines; 1322 vector<SharedPtrVkImageView> colorAttachments; 1323 vector<SharedPtrVkImageView> depthStencilAttachments; 1324 vector<VkImageView> attachmentHandles; // all attachments (color and d/s) 1325 1326 // For each image layer or slice (3D), create an attachment and a pipeline 1327 { 1328 VkPipeline basePipeline = DE_NULL; 1329 1330 // Color attachments are first in the framebuffer 1331 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx) 1332 { 1333 colorAttachments.push_back(makeSharedPtr(makeImageView( 1334 vk, device, colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat, 1335 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 1u, subpassNdx, 1u)))); 1336 attachmentHandles.push_back(**colorAttachments.back()); 1337 1338 // We also have to create pipelines for each subpass 1339 pipelines.push_back(makeSharedPtr(makeGraphicsPipeline( 1340 vk, device, basePipeline, pipelineLayout, *renderPass, vertexModule, fragmentModule, mipSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, 1341 static_cast<deUint32>(subpassNdx), useDepth, useStencil))); 1342 1343 basePipeline = **pipelines.front(); 1344 } 1345 1346 // Then D/S attachments, if any 1347 if (useDepth || useStencil) 1348 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx) 1349 { 1350 depthStencilAttachments.push_back(makeSharedPtr(makeImageView( 1351 vk, device, depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat, 1352 makeImageSubresourceRange(depthStencilAspect, mipLevel, 1u, subpassNdx, 1u)))); 1353 attachmentHandles.push_back(**depthStencilAttachments.back()); 1354 } 1355 } 1356 1357 const Unique<VkFramebuffer> framebuffer (makeFramebuffer(vk, device, *renderPass, static_cast<deUint32>(attachmentHandles.size()), &attachmentHandles[0], 1358 static_cast<deUint32>(mipSize.x()), static_cast<deUint32>(mipSize.y()))); 1359 1360 { 1361 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex)); 1362 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool)); 1363 1364 beginCommandBuffer(vk, *cmdBuffer); 1365 { 1366 vector<VkClearValue> clearValues (numSlices, getClearValue(caseDef.colorFormat)); 1367 1368 if (useDepth || useStencil) 1369 clearValues.insert(clearValues.end(), numSlices, makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE)); 1370 1371 const VkRect2D renderArea = 1372 { 1373 makeOffset2D(0, 0), 1374 makeExtent2D(mipSize.x(), mipSize.y()), 1375 }; 1376 const VkRenderPassBeginInfo renderPassBeginInfo = 1377 { 1378 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType; 1379 DE_NULL, // const void* pNext; 1380 *renderPass, // VkRenderPass renderPass; 1381 *framebuffer, // VkFramebuffer framebuffer; 1382 renderArea, // VkRect2D renderArea; 1383 static_cast<deUint32>(clearValues.size()), // uint32_t clearValueCount; 1384 &clearValues[0], // const VkClearValue* pClearValues; 1385 }; 1386 const VkDeviceSize vertexBufferOffset = 0ull; 1387 1388 vk.cmdBeginRenderPass(*cmdBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); 1389 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset); 1390 } 1391 1392 // Draw 1393 for (deUint32 subpassNdx = 0; subpassNdx < static_cast<deUint32>(numSlices); ++subpassNdx) 1394 { 1395 if (subpassNdx != 0) 1396 vk.cmdNextSubpass(*cmdBuffer, VK_SUBPASS_CONTENTS_INLINE); 1397 1398 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, **pipelines[subpassNdx]); 1399 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx*4u, 0u); 1400 } 1401 1402 vk.cmdEndRenderPass(*cmdBuffer); 1403 1404 VK_CHECK(vk.endCommandBuffer(*cmdBuffer)); 1405 submitCommandsAndWait(vk, device, queue, *cmdBuffer); 1406 } 1407 } 1408 1409 //! Use image mip levels as attachments 1410 tcu::TestStatus testRenderToMipMaps (Context& context, const CaseDef caseDef) 1411 { 1412 checkImageViewTypeRequirements(context, caseDef.viewType); 1413 1414 const DeviceInterface& vk = context.getDeviceInterface(); 1415 const InstanceInterface& vki = context.getInstanceInterface(); 1416 const VkDevice device = context.getDevice(); 1417 const VkPhysicalDevice physDevice = context.getPhysicalDevice(); 1418 const VkQueue queue = context.getUniversalQueue(); 1419 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex(); 1420 Allocator& allocator = context.getDefaultAllocator(); 1421 1422 const IVec4 imageSize = caseDef.imageSizeHint; // MAX_SIZE is not used in this test 1423 const deInt32 numSlices = maxLayersOrDepth(imageSize); 1424 const vector<IVec4> mipLevelSizes = getMipLevelSizes(imageSize); 1425 const vector<VkDeviceSize> mipLevelStorageSizes = getPerMipLevelStorageSize(mipLevelSizes, tcu::getPixelSize(mapVkFormat(caseDef.colorFormat))); 1426 const int numMipLevels = static_cast<int>(mipLevelSizes.size()); 1427 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED); 1428 1429 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED) 1430 { 1431 if (!isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_KHR_dedicated_allocation")) 1432 TCU_THROW(NotSupportedError, "VK_KHR_dedicated_allocation is not supported"); 1433 } 1434 1435 if (useDepthStencil && !isDepthStencilFormatSupported(vki, physDevice, caseDef.depthStencilFormat)) 1436 TCU_THROW(NotSupportedError, "Unsupported depth/stencil format"); 1437 1438 // Create a color buffer big enough to hold all layers and mip levels 1439 const VkDeviceSize colorBufferSize = sum(mipLevelStorageSizes); 1440 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT)); 1441 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind)); 1442 1443 { 1444 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize)); 1445 flushMappedMemoryRange(vk, device, colorBufferAlloc->getMemory(), colorBufferAlloc->getOffset(), VK_WHOLE_SIZE); 1446 } 1447 1448 const Unique<VkShaderModule> vertexModule (createShaderModule (vk, device, context.getBinaryCollection().get("vert"), 0u)); 1449 const Unique<VkShaderModule> fragmentModule (createShaderModule (vk, device, context.getBinaryCollection().get("frag"), 0u)); 1450 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout (vk, device)); 1451 1452 Move<VkImage> colorImage; 1453 MovePtr<Allocation> colorImageAlloc; 1454 Move<VkImage> depthStencilImage; 1455 MovePtr<Allocation> depthStencilImageAlloc; 1456 Move<VkBuffer> vertexBuffer; 1457 MovePtr<Allocation> vertexBufferAlloc; 1458 1459 // Create a color image 1460 { 1461 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; 1462 1463 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat, 1464 imageSize.swizzle(0, 1, 2), numMipLevels, imageSize.w(), imageUsage); 1465 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator, caseDef.allocationKind); 1466 } 1467 1468 // Create a depth/stencil image (always a 2D image, optionally layered) 1469 if (useDepthStencil) 1470 { 1471 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; 1472 1473 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat, 1474 IVec3(imageSize.x(), imageSize.y(), 1), numMipLevels, numSlices, imageUsage); 1475 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any, allocator, caseDef.allocationKind); 1476 } 1477 1478 // Create a vertex buffer 1479 { 1480 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices); 1481 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices); 1482 1483 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); 1484 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind); 1485 1486 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize)); 1487 flushMappedMemoryRange(vk, device, vertexBufferAlloc->getMemory(), vertexBufferAlloc->getOffset(), vertexBufferSize); 1488 } 1489 1490 // Prepare images 1491 { 1492 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex)); 1493 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool)); 1494 1495 beginCommandBuffer(vk, *cmdBuffer); 1496 1497 const VkImageMemoryBarrier imageBarriers[] = 1498 { 1499 { 1500 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType; 1501 DE_NULL, // const void* pNext; 1502 (VkAccessFlags)0, // VkAccessFlags srcAccessMask; 1503 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask; 1504 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout; 1505 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout; 1506 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex; 1507 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex; 1508 *colorImage, // VkImage image; 1509 { // VkImageSubresourceRange subresourceRange; 1510 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask; 1511 0u, // uint32_t baseMipLevel; 1512 static_cast<deUint32>(numMipLevels), // uint32_t levelCount; 1513 0u, // uint32_t baseArrayLayer; 1514 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount; 1515 }, 1516 }, 1517 { 1518 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType; 1519 DE_NULL, // const void* pNext; 1520 (VkAccessFlags)0, // VkAccessFlags srcAccessMask; 1521 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask; 1522 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout; 1523 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout; 1524 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex; 1525 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex; 1526 *depthStencilImage, // VkImage image; 1527 { // VkImageSubresourceRange subresourceRange; 1528 getFormatAspectFlags(caseDef.depthStencilFormat), // VkImageAspectFlags aspectMask; 1529 0u, // uint32_t baseMipLevel; 1530 static_cast<deUint32>(numMipLevels), // uint32_t levelCount; 1531 0u, // uint32_t baseArrayLayer; 1532 static_cast<deUint32>(numSlices), // uint32_t layerCount; 1533 }, 1534 } 1535 }; 1536 1537 const deUint32 numImageBarriers = static_cast<deUint32>(DE_LENGTH_OF_ARRAY(imageBarriers) - (useDepthStencil ? 0 : 1)); 1538 1539 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0u, 1540 0u, DE_NULL, 0u, DE_NULL, numImageBarriers, imageBarriers); 1541 1542 VK_CHECK(vk.endCommandBuffer(*cmdBuffer)); 1543 submitCommandsAndWait(vk, device, queue, *cmdBuffer); 1544 } 1545 1546 // Draw 1547 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel) 1548 { 1549 const IVec4& mipSize = mipLevelSizes[mipLevel]; 1550 const int levelSlices = maxLayersOrDepth(mipSize); 1551 1552 drawToMipLevel (context, caseDef, mipLevel, mipSize, levelSlices, *colorImage, *depthStencilImage, *vertexBuffer, *pipelineLayout, 1553 *vertexModule, *fragmentModule); 1554 } 1555 1556 // Copy results: colorImage -> host visible colorBuffer 1557 { 1558 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex)); 1559 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool)); 1560 1561 beginCommandBuffer(vk, *cmdBuffer); 1562 1563 { 1564 const VkImageMemoryBarrier imageBarriers[] = 1565 { 1566 { 1567 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType; 1568 DE_NULL, // const void* pNext; 1569 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask; 1570 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask; 1571 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout; 1572 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout; 1573 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex; 1574 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex; 1575 *colorImage, // VkImage image; 1576 { // VkImageSubresourceRange subresourceRange; 1577 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask; 1578 0u, // uint32_t baseMipLevel; 1579 static_cast<deUint32>(numMipLevels), // uint32_t levelCount; 1580 0u, // uint32_t baseArrayLayer; 1581 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount; 1582 }, 1583 } 1584 }; 1585 1586 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 1587 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers); 1588 } 1589 { 1590 vector<VkBufferImageCopy> regions; 1591 VkDeviceSize levelOffset = 0ull; 1592 VkBufferImageCopy workRegion = 1593 { 1594 0ull, // VkDeviceSize bufferOffset; 1595 0u, // uint32_t bufferRowLength; 1596 0u, // uint32_t bufferImageHeight; 1597 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, imageSize.w()), // VkImageSubresourceLayers imageSubresource; 1598 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset; 1599 makeExtent3D(0, 0, 0), // VkExtent3D imageExtent; 1600 }; 1601 1602 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel) 1603 { 1604 workRegion.bufferOffset = levelOffset; 1605 workRegion.imageSubresource.mipLevel = static_cast<deUint32>(mipLevel); 1606 workRegion.imageExtent = makeExtent3D(mipLevelSizes[mipLevel].swizzle(0, 1, 2)); 1607 1608 regions.push_back(workRegion); 1609 1610 levelOffset += mipLevelStorageSizes[mipLevel]; 1611 } 1612 1613 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, static_cast<deUint32>(regions.size()), ®ions[0]); 1614 } 1615 { 1616 const VkBufferMemoryBarrier bufferBarriers[] = 1617 { 1618 { 1619 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType; 1620 DE_NULL, // const void* pNext; 1621 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask; 1622 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask; 1623 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex; 1624 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex; 1625 *colorBuffer, // VkBuffer buffer; 1626 0ull, // VkDeviceSize offset; 1627 VK_WHOLE_SIZE, // VkDeviceSize size; 1628 }, 1629 }; 1630 1631 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1632 0u, DE_NULL, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, DE_NULL); 1633 } 1634 1635 VK_CHECK(vk.endCommandBuffer(*cmdBuffer)); 1636 submitCommandsAndWait(vk, device, queue, *cmdBuffer); 1637 } 1638 1639 // Verify results (per mip level) 1640 { 1641 invalidateMappedMemoryRange(vk, device, colorBufferAlloc->getMemory(), colorBufferAlloc->getOffset(), VK_WHOLE_SIZE); 1642 1643 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat); 1644 1645 VkDeviceSize levelOffset = 0ull; 1646 bool allOk = true; 1647 1648 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel) 1649 { 1650 const IVec4& mipSize = mipLevelSizes[mipLevel]; 1651 const void* const pLevelData = static_cast<const deUint8*>(colorBufferAlloc->getHostPtr()) + levelOffset; 1652 const int levelDepth = maxLayersOrDepth(mipSize); 1653 const tcu::ConstPixelBufferAccess resultImage (format, mipSize.x(), mipSize.y(), levelDepth, pLevelData); 1654 tcu::TextureLevel textureLevel (format, mipSize.x(), mipSize.y(), levelDepth); 1655 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess(); 1656 const std::string comparisonName = "Mip level " + de::toString(mipLevel); 1657 bool ok = false; 1658 1659 generateExpectedImage(expectedImage, mipSize.swizzle(0, 1), 0); 1660 1661 if (isFloatFormat(caseDef.colorFormat)) 1662 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", comparisonName.c_str(), expectedImage, resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT); 1663 else 1664 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", comparisonName.c_str(), expectedImage, resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT); 1665 1666 allOk = allOk && ok; // keep testing all levels, even if we know it's a fail overall 1667 levelOffset += mipLevelStorageSizes[mipLevel]; 1668 } 1669 1670 return allOk ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail"); 1671 } 1672 } 1673 1674 std::string getSizeDescription (const IVec4& size) 1675 { 1676 std::ostringstream str; 1677 1678 const char* const description[4] = 1679 { 1680 "width", "height", "depth", "layers" 1681 }; 1682 1683 int numMaxComponents = 0; 1684 1685 for (int i = 0; i < 4; ++i) 1686 { 1687 if (size[i] == MAX_SIZE) 1688 { 1689 if (numMaxComponents > 0) 1690 str << "_"; 1691 1692 str << description[i]; 1693 ++numMaxComponents; 1694 } 1695 } 1696 1697 if (numMaxComponents == 0) 1698 str << "small"; 1699 1700 return str.str(); 1701 } 1702 1703 inline std::string getFormatString (const VkFormat format) 1704 { 1705 std::string name(getFormatName(format)); 1706 return de::toLower(name.substr(10)); 1707 } 1708 1709 std::string getFormatString (const VkFormat colorFormat, const VkFormat depthStencilFormat) 1710 { 1711 std::ostringstream str; 1712 str << getFormatString(colorFormat); 1713 if (depthStencilFormat != VK_FORMAT_UNDEFINED) 1714 str << "_" << getFormatString(depthStencilFormat); 1715 return str.str(); 1716 } 1717 1718 std::string getShortImageViewTypeName (const VkImageViewType imageViewType) 1719 { 1720 std::string s(getImageViewTypeName(imageViewType)); 1721 return de::toLower(s.substr(19)); 1722 } 1723 1724 inline BVec4 bvecFromMask (deUint32 mask) 1725 { 1726 return BVec4((mask >> 0) & 1, 1727 (mask >> 1) & 1, 1728 (mask >> 2) & 1, 1729 (mask >> 3) & 1); 1730 } 1731 1732 vector<IVec4> genSizeCombinations (const IVec4& baselineSize, const deUint32 sizeMask, const VkImageViewType imageViewType) 1733 { 1734 vector<IVec4> sizes; 1735 std::set<deUint32> masks; 1736 1737 for (deUint32 i = 0; i < (1u << 4); ++i) 1738 { 1739 // Cube images have square faces 1740 if (isCube(imageViewType) && ((i & MASK_WH) != 0)) 1741 i |= MASK_WH; 1742 1743 masks.insert(i & sizeMask); 1744 } 1745 1746 for (std::set<deUint32>::const_iterator it = masks.begin(); it != masks.end(); ++it) 1747 sizes.push_back(tcu::select(IVec4(MAX_SIZE), baselineSize, bvecFromMask(*it))); 1748 1749 return sizes; 1750 } 1751 1752 void addTestCasesWithFunctions (tcu::TestCaseGroup* group, AllocationKind allocationKind) 1753 { 1754 const struct 1755 { 1756 VkImageViewType viewType; 1757 IVec4 baselineSize; //!< image size: (dimX, dimY, dimZ, arraySize) 1758 deUint32 sizeMask; //!< if a dimension is masked, generate a huge size case for it 1759 } testCase[] = 1760 { 1761 { VK_IMAGE_VIEW_TYPE_1D, IVec4(54, 1, 1, 1), MASK_W }, 1762 { VK_IMAGE_VIEW_TYPE_1D_ARRAY, IVec4(54, 1, 1, 4), MASK_W_LAYERS }, 1763 { VK_IMAGE_VIEW_TYPE_2D, IVec4(44, 23, 1, 1), MASK_WH }, 1764 { VK_IMAGE_VIEW_TYPE_2D_ARRAY, IVec4(44, 23, 1, 4), MASK_WH_LAYERS }, 1765 { VK_IMAGE_VIEW_TYPE_3D, IVec4(22, 31, 7, 1), MASK_WHD }, 1766 { VK_IMAGE_VIEW_TYPE_CUBE, IVec4(35, 35, 1, 6), MASK_WH }, 1767 { VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, IVec4(35, 35, 1, 2*6), MASK_WH_LAYERS }, 1768 }; 1769 1770 const VkFormat format[] = 1771 { 1772 VK_FORMAT_R8G8B8A8_UNORM, 1773 VK_FORMAT_R32_UINT, 1774 VK_FORMAT_R16G16_SINT, 1775 VK_FORMAT_R32G32B32A32_SFLOAT, 1776 }; 1777 1778 const VkFormat depthStencilFormat[] = 1779 { 1780 VK_FORMAT_UNDEFINED, // don't use a depth/stencil attachment 1781 VK_FORMAT_D16_UNORM, 1782 VK_FORMAT_S8_UINT, 1783 VK_FORMAT_D24_UNORM_S8_UINT, // one of the following mixed formats must be supported 1784 VK_FORMAT_D32_SFLOAT_S8_UINT, 1785 }; 1786 1787 for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(testCase); ++caseNdx) 1788 { 1789 MovePtr<tcu::TestCaseGroup> imageGroup(new tcu::TestCaseGroup(group->getTestContext(), getShortImageViewTypeName(testCase[caseNdx].viewType).c_str(), "")); 1790 1791 // Generate attachment size cases 1792 { 1793 const vector<IVec4> sizes = genSizeCombinations(testCase[caseNdx].baselineSize, testCase[caseNdx].sizeMask, testCase[caseNdx].viewType); 1794 1795 MovePtr<tcu::TestCaseGroup> smallGroup(new tcu::TestCaseGroup(group->getTestContext(), "small", "")); 1796 MovePtr<tcu::TestCaseGroup> hugeGroup (new tcu::TestCaseGroup(group->getTestContext(), "huge", "")); 1797 1798 imageGroup->addChild(smallGroup.get()); 1799 imageGroup->addChild(hugeGroup.get()); 1800 1801 for (vector<IVec4>::const_iterator sizeIter = sizes.begin(); sizeIter != sizes.end(); ++sizeIter) 1802 { 1803 // The first size is the baseline size, put it in a dedicated group 1804 if (sizeIter == sizes.begin()) 1805 { 1806 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx) 1807 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx) 1808 { 1809 const CaseDef caseDef = 1810 { 1811 testCase[caseNdx].viewType, // VkImageViewType imageType; 1812 *sizeIter, // IVec4 imageSizeHint; 1813 format[formatNdx], // VkFormat colorFormat; 1814 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat; 1815 allocationKind // AllocationKind allocationKind; 1816 }; 1817 addFunctionCaseWithPrograms(smallGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]), "", initPrograms, testAttachmentSize, caseDef); 1818 } 1819 } 1820 else // All huge cases go into a separate group 1821 { 1822 if (allocationKind != ALLOCATION_KIND_DEDICATED) 1823 { 1824 MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(group->getTestContext(), getSizeDescription(*sizeIter).c_str(), "")); 1825 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM; 1826 1827 // Use the same color format for all cases, to reduce the number of permutations 1828 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx) 1829 { 1830 const CaseDef caseDef = 1831 { 1832 testCase[caseNdx].viewType, // VkImageViewType viewType; 1833 *sizeIter, // IVec4 imageSizeHint; 1834 colorFormat, // VkFormat colorFormat; 1835 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat; 1836 allocationKind // AllocationKind allocationKind; 1837 }; 1838 addFunctionCaseWithPrograms(sizeGroup.get(), getFormatString(colorFormat, depthStencilFormat[dsFormatNdx]), "", initPrograms, testAttachmentSize, caseDef); 1839 } 1840 hugeGroup->addChild(sizeGroup.release()); 1841 } 1842 } 1843 } 1844 smallGroup.release(); 1845 hugeGroup.release(); 1846 } 1847 1848 // Generate mip map cases 1849 { 1850 MovePtr<tcu::TestCaseGroup> mipmapGroup(new tcu::TestCaseGroup(group->getTestContext(), "mipmap", "")); 1851 1852 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx) 1853 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx) 1854 { 1855 const CaseDef caseDef = 1856 { 1857 testCase[caseNdx].viewType, // VkImageViewType imageType; 1858 testCase[caseNdx].baselineSize, // IVec4 imageSizeHint; 1859 format[formatNdx], // VkFormat colorFormat; 1860 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat; 1861 allocationKind // AllocationKind allocationKind; 1862 }; 1863 addFunctionCaseWithPrograms(mipmapGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]), "", initPrograms, testRenderToMipMaps, caseDef); 1864 } 1865 imageGroup->addChild(mipmapGroup.release()); 1866 } 1867 1868 group->addChild(imageGroup.release()); 1869 } 1870 } 1871 1872 void addCoreRenderToImageTests (tcu::TestCaseGroup* group) 1873 { 1874 addTestCasesWithFunctions(group, ALLOCATION_KIND_SUBALLOCATED); 1875 } 1876 1877 void addDedicatedAllocationRenderToImageTests (tcu::TestCaseGroup* group) 1878 { 1879 addTestCasesWithFunctions(group, ALLOCATION_KIND_DEDICATED); 1880 } 1881 1882 } // anonymous ns 1883 1884 tcu::TestCaseGroup* createRenderToImageTests (tcu::TestContext& testCtx) 1885 { 1886 de::MovePtr<tcu::TestCaseGroup> renderToImageTests (new tcu::TestCaseGroup(testCtx, "render_to_image", "Render to image tests")); 1887 1888 renderToImageTests->addChild(createTestGroup(testCtx, "core", "Core render to image tests", addCoreRenderToImageTests)); 1889 renderToImageTests->addChild(createTestGroup(testCtx, "dedicated_allocation", "Render to image tests for dedicated memory allocation", addDedicatedAllocationRenderToImageTests)); 1890 1891 return renderToImageTests.release(); 1892 } 1893 1894 } // pipeline 1895 } // vkt 1896