1 /*------------------------------------------------------------------------ 2 * Vulkan Conformance Tests 3 * ------------------------ 4 * 5 * Copyright (c) 2017 The Khronos Group Inc. 6 * Copyright (c) 2017 Samsung Electronics Co., Ltd. 7 * 8 * Licensed under the Apache License, Version 2.0 (the "License"); 9 * you may not use this file except in compliance with the License. 10 * You may obtain a copy of the License at 11 * 12 * http://www.apache.org/licenses/LICENSE-2.0 13 * 14 * Unless required by applicable law or agreed to in writing, software 15 * distributed under the License is distributed on an "AS IS" BASIS, 16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 * See the License for the specific language governing permissions and 18 * limitations under the License. 19 * 20 *//*! 21 * \file 22 * \brief Protected memory storage buffer tests 23 *//*--------------------------------------------------------------------*/ 24 25 #include "vktProtectedMemStorageBufferTests.hpp" 26 27 #include "deRandom.hpp" 28 #include "deStringUtil.hpp" 29 #include "tcuTestLog.hpp" 30 #include "tcuVector.hpp" 31 #include "tcuStringTemplate.hpp" 32 33 #include "vkPrograms.hpp" 34 #include "vktTestCase.hpp" 35 #include "vktTestGroupUtil.hpp" 36 #include "vkTypeUtil.hpp" 37 #include "vkBuilderUtil.hpp" 38 39 #include "vktProtectedMemBufferValidator.hpp" 40 #include "vktProtectedMemUtils.hpp" 41 #include "vktProtectedMemContext.hpp" 42 43 namespace vkt 44 { 45 namespace ProtectedMem 46 { 47 48 namespace 49 { 50 51 enum { 52 RENDER_HEIGHT = 128, 53 RENDER_WIDTH = 128, 54 }; 55 56 enum { 57 RANDOM_TEST_COUNT = 10, 58 }; 59 60 enum SSBOTestType { 61 SSBO_READ, 62 SSBO_WRITE, 63 SSBO_ATOMIC 64 }; 65 66 enum SSBOAtomicType { 67 ATOMIC_ADD, 68 ATOMIC_MIN, 69 ATOMIC_MAX, 70 ATOMIC_AND, 71 ATOMIC_OR, 72 ATOMIC_XOR, 73 ATOMIC_EXCHANGE, 74 ATOMIC_COMPSWAP 75 }; 76 77 78 const char* getSSBOTestDescription (SSBOTestType type) 79 { 80 switch (type) { 81 case SSBO_READ: return "Test for read storage buffer on protected memory."; 82 case SSBO_WRITE: return "Test for write storage buffer on protected memory."; 83 case SSBO_ATOMIC: return "Test for atomic storage buffer on protected memory."; 84 default: DE_FATAL("Invalid SSBO test type"); return ""; 85 } 86 } 87 88 const char* getSSBOTypeString (SSBOTestType type) 89 { 90 switch (type) { 91 case SSBO_READ: return "read"; 92 case SSBO_WRITE: return "write"; 93 case SSBO_ATOMIC: return "atomic"; 94 default: DE_FATAL("Invalid SSBO test type"); return ""; 95 } 96 } 97 98 const char* getShaderTypeString (const glu::ShaderType shaderType) 99 { 100 switch (shaderType) { 101 case glu::SHADERTYPE_FRAGMENT: return "fragment"; 102 case glu::SHADERTYPE_COMPUTE: return "compute"; 103 default: DE_FATAL("Invalid shader type"); return ""; 104 } 105 } 106 107 const char* getSSBOAtomicTypeString (SSBOAtomicType type) 108 { 109 switch (type) 110 { 111 case ATOMIC_ADD: return "add"; 112 case ATOMIC_MIN: return "min"; 113 case ATOMIC_MAX: return "max"; 114 case ATOMIC_AND: return "and"; 115 case ATOMIC_OR: return "or"; 116 case ATOMIC_XOR: return "xor"; 117 case ATOMIC_EXCHANGE: return "exchange"; 118 case ATOMIC_COMPSWAP: return "compswap"; 119 default: DE_FATAL("Invalid SSBO atomic operation type"); return ""; 120 } 121 } 122 123 void static addBufferCopyCmd (const vk::DeviceInterface& vk, 124 vk::VkCommandBuffer cmdBuffer, 125 deUint32 queueFamilyIndex, 126 vk::VkBuffer srcBuffer, 127 vk::VkBuffer dstBuffer, 128 deUint32 copySize) 129 { 130 const vk::VkBufferMemoryBarrier dstWriteStartBarrier = 131 { 132 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType 133 DE_NULL, // const void* pNext 134 0, // VkAccessFlags srcAccessMask 135 vk::VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags dstAccessMask 136 queueFamilyIndex, // uint32_t srcQueueFamilyIndex 137 queueFamilyIndex, // uint32_t dstQueueFamilyIndex 138 dstBuffer, // VkBuffer buffer 139 0u, // VkDeviceSize offset 140 VK_WHOLE_SIZE, // VkDeviceSize size 141 }; 142 vk.cmdPipelineBarrier(cmdBuffer, 143 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 144 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, 145 (vk::VkDependencyFlags)0, 146 0, (const vk::VkMemoryBarrier*)DE_NULL, 147 1, &dstWriteStartBarrier, 148 0, (const vk::VkImageMemoryBarrier*)DE_NULL); 149 150 const vk::VkBufferCopy copyRegion = 151 { 152 0, // VkDeviceSize srcOffset 153 0, // VkDeviceSize dstOffset 154 copySize // VkDeviceSize size 155 }; 156 vk.cmdCopyBuffer(cmdBuffer, srcBuffer, dstBuffer, 1, ©Region); 157 158 const vk::VkBufferMemoryBarrier dstWriteEndBarrier = 159 { 160 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType 161 DE_NULL, // const void* pNext 162 vk::VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask 163 vk::VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask 164 queueFamilyIndex, // uint32_t srcQueueFamilyIndex 165 queueFamilyIndex, // uint32_t dstQueueFamilyIndex 166 dstBuffer, // VkBuffer buffer 167 0u, // VkDeviceSize offset 168 VK_WHOLE_SIZE, // VkDeviceSize size 169 }; 170 vk.cmdPipelineBarrier(cmdBuffer, 171 vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 172 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 173 (vk::VkDependencyFlags)0, 174 0, (const vk::VkMemoryBarrier*)DE_NULL, 175 1, &dstWriteEndBarrier, 176 0, (const vk::VkImageMemoryBarrier*)DE_NULL); 177 178 } 179 180 template<typename T> 181 class StorageBufferTestInstance : public ProtectedTestInstance 182 { 183 public: 184 StorageBufferTestInstance (Context& ctx, 185 const SSBOTestType testType, 186 const glu::ShaderType shaderType, 187 const tcu::UVec4 testInput, 188 const BufferValidator<T>& validator); 189 virtual tcu::TestStatus iterate (void); 190 191 private: 192 tcu::TestStatus executeFragmentTest (void); 193 tcu::TestStatus executeComputeTest (void); 194 195 const SSBOTestType m_testType; 196 const glu::ShaderType m_shaderType; 197 const tcu::UVec4 m_testInput; 198 const BufferValidator<T>& m_validator; 199 const vk::VkFormat m_imageFormat; 200 }; 201 202 template<typename T> 203 class StorageBufferTestCase : public TestCase 204 { 205 public: 206 StorageBufferTestCase (tcu::TestContext& testctx, 207 const SSBOTestType testType, 208 const glu::ShaderType shaderType, 209 const char* name, 210 const tcu::UVec4 testInput, 211 ValidationDataStorage<T> validationData, 212 const std::string& extraShader = "") 213 : TestCase (testctx, name, getSSBOTestDescription(testType)) 214 , m_testType (testType) 215 , m_shaderType (shaderType) 216 , m_testInput (testInput) 217 , m_validator (validationData) 218 , m_extraShader (extraShader) 219 { 220 } 221 virtual TestInstance* createInstance (Context& ctx) const 222 { 223 return new StorageBufferTestInstance<T>(ctx, m_testType, m_shaderType, m_testInput, m_validator); 224 } 225 virtual void initPrograms (vk::SourceCollections& programCollection) const; 226 227 virtual ~StorageBufferTestCase (void) {} 228 229 private: 230 const SSBOTestType m_testType; 231 const glu::ShaderType m_shaderType; 232 const tcu::UVec4 m_testInput; 233 const BufferValidator<T> m_validator; 234 const std::string m_extraShader; 235 }; 236 237 template<typename T> 238 StorageBufferTestInstance<T>::StorageBufferTestInstance (Context& ctx, 239 const SSBOTestType testType, 240 const glu::ShaderType shaderType, 241 const tcu::UVec4 testInput, 242 const BufferValidator<T>& validator) 243 : ProtectedTestInstance (ctx) 244 , m_testType (testType) 245 , m_shaderType (shaderType) 246 , m_testInput (testInput) 247 , m_validator (validator) 248 , m_imageFormat (vk::VK_FORMAT_R8G8B8A8_UNORM) 249 { 250 } 251 252 template<typename T> 253 void StorageBufferTestCase<T>::initPrograms (vk::SourceCollections& programCollection) const 254 { 255 const char* vertexShader = 256 "#version 450\n" 257 "layout(location=0) out vec4 vIndex;\n" 258 "void main() {\n" 259 " vec2 pos[4] = vec2[4]( vec2(-0.7, 0.7), vec2(0.7, 0.7), vec2(0.0, -0.7), vec2(-0.7, -0.7) );\n" 260 " vIndex = vec4(gl_VertexIndex);\n" 261 " gl_PointSize = 1.0;\n" 262 " gl_Position = vec4(pos[gl_VertexIndex], 0.0, 1.0);\n" 263 "}"; 264 265 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uvec4) 266 // set = 0, location = 2 -> buffer ProtectedTestBufferSource (uvec4) 267 const char* readShaderTemplateStr = 268 "#version 450\n" 269 "${INPUT_DECLARATION}\n" 270 "\n" 271 "layout(set=0, binding=0, std140) buffer ProtectedTestBuffer\n" 272 "{\n" 273 " highp uvec4 protectedTestResultBuffer;\n" 274 "};\n" 275 "\n" 276 "layout(set=0, binding=2, std140) buffer ProtectedTestBufferSource\n" 277 "{\n" 278 " highp uvec4 protectedTestBufferSource;\n" 279 "};\n" 280 "\n" 281 "void main (void)\n" 282 "{\n" 283 " protectedTestResultBuffer = protectedTestBufferSource;\n" 284 " ${FRAGMENT_OUTPUT}\n" 285 "}\n"; 286 287 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uvec4) 288 // set = 0, location = 1 -> uniform Data (uvec4) 289 const char* writeShaderTemplateStr = 290 "#version 450\n" 291 "${INPUT_DECLARATION}\n" 292 "\n" 293 "layout(set=0, binding=0, std140) buffer ProtectedTestBuffer\n" 294 "{\n" 295 " highp uvec4 protectedTestResultBuffer;\n" 296 "};\n" 297 "\n" 298 "layout(set=0, binding=1, std140) uniform Data\n" 299 "{\n" 300 " highp uvec4 testInput;\n" 301 "};\n" 302 "\n" 303 "void main (void)\n" 304 "{\n" 305 " protectedTestResultBuffer = testInput;\n" 306 " ${FRAGMENT_OUTPUT}\n" 307 "}\n"; 308 309 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uint [4]) 310 const char* atomicTestShaderTemplateStr = 311 "#version 450\n" 312 "${INPUT_DECLARATION}\n" 313 "\n" 314 "layout(set=0, binding=0, std430) buffer ProtectedTestBuffer\n" 315 "{\n" 316 " highp uint protectedTestResultBuffer[4];\n" 317 "};\n" 318 "\n" 319 "void main (void)\n" 320 "{\n" 321 " uint i = uint(${INVOCATION_ID});\n" 322 " ${ATOMIC_FUNCTION_CALL}\n" 323 " ${FRAGMENT_OUTPUT}\n" 324 "}\n"; 325 326 const char* shaderTemplateStr; 327 std::map<std::string, std::string> shaderParam; 328 switch (m_testType) { 329 case SSBO_READ: shaderTemplateStr = readShaderTemplateStr; break; 330 case SSBO_WRITE: shaderTemplateStr = writeShaderTemplateStr; break; 331 case SSBO_ATOMIC: { 332 shaderTemplateStr = atomicTestShaderTemplateStr; 333 shaderParam["ATOMIC_FUNCTION_CALL"] = m_extraShader; 334 break; 335 } 336 default: DE_FATAL("Incorrect SSBO test type"); return; 337 } 338 339 if (m_shaderType == glu::SHADERTYPE_FRAGMENT) 340 { 341 shaderParam["INPUT_DECLARATION"] = "layout(location=0) out mediump vec4 o_color;\n" 342 "layout(location=0) in vec4 vIndex;\n"; 343 shaderParam["FRAGMENT_OUTPUT"] = "o_color = vec4( 0.0, 0.4, 1.0, 1.0 );\n"; 344 shaderParam["INVOCATION_ID"] = "vIndex.x"; 345 346 programCollection.glslSources.add("vert") << glu::VertexSource(vertexShader); 347 programCollection.glslSources.add("TestShader") << glu::FragmentSource(tcu::StringTemplate(shaderTemplateStr).specialize(shaderParam)); 348 } 349 else if (m_shaderType == glu::SHADERTYPE_COMPUTE) 350 { 351 shaderParam["INPUT_DECLARATION"] = "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"; 352 shaderParam["FRAGMENT_OUTPUT"] = ""; 353 shaderParam["INVOCATION_ID"] = "gl_GlobalInvocationID.x"; 354 programCollection.glslSources.add("TestShader") << glu::ComputeSource(tcu::StringTemplate(shaderTemplateStr).specialize(shaderParam)); 355 } 356 else 357 DE_FATAL("Incorrect shader type"); 358 359 m_validator.initPrograms(programCollection); 360 } 361 362 template<typename T> 363 tcu::TestStatus StorageBufferTestInstance<T>::executeFragmentTest(void) 364 { 365 ProtectedContext& ctx (m_protectedContext); 366 const vk::DeviceInterface& vk = ctx.getDeviceInterface(); 367 const vk::VkDevice device = ctx.getDevice(); 368 const vk::VkQueue queue = ctx.getQueue(); 369 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex(); 370 371 const deUint32 testUniformSize = sizeof(m_testInput); 372 de::UniquePtr<vk::BufferWithMemory> testUniform (makeBuffer(ctx, 373 PROTECTION_DISABLED, 374 queueFamilyIndex, 375 testUniformSize, 376 vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT 377 | vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 378 vk::MemoryRequirement::HostVisible)); 379 380 // Set the test input uniform data 381 { 382 deMemcpy(testUniform->getAllocation().getHostPtr(), &m_testInput, testUniformSize); 383 vk::flushMappedMemoryRange(vk, device, testUniform->getAllocation().getMemory(), testUniform->getAllocation().getOffset(), testUniformSize); 384 } 385 const deUint32 testBufferSize = sizeof(ValidationDataStorage<T>); 386 de::MovePtr<vk::BufferWithMemory> testBuffer (makeBuffer(ctx, 387 PROTECTION_ENABLED, 388 queueFamilyIndex, 389 testBufferSize, 390 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT 391 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, 392 vk::MemoryRequirement::Protected)); 393 de::MovePtr<vk::BufferWithMemory> testBufferSource (makeBuffer(ctx, 394 PROTECTION_ENABLED, 395 queueFamilyIndex, 396 testBufferSize, 397 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT 398 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, 399 vk::MemoryRequirement::Protected)); 400 401 vk::Move<vk::VkShaderModule> vertexShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("vert"), 0)); 402 vk::Unique<vk::VkShaderModule> testShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("TestShader"), 0)); 403 404 // Create descriptors 405 vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(vk::DescriptorSetLayoutBuilder() 406 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_ALL) 407 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_ALL) 408 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_ALL) 409 .build(vk, device)); 410 vk::Unique<vk::VkDescriptorPool> descriptorPool(vk::DescriptorPoolBuilder() 411 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u) 412 .addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u) 413 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u) 414 .build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); 415 vk::Unique<vk::VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout)); 416 417 // Update descriptor set information 418 { 419 vk::VkDescriptorBufferInfo descTestBuffer = makeDescriptorBufferInfo(**testBuffer, 0, testBufferSize); 420 vk::VkDescriptorBufferInfo descTestUniform = makeDescriptorBufferInfo(**testUniform, 0, testUniformSize); 421 vk::VkDescriptorBufferInfo descTestBufferSource = makeDescriptorBufferInfo(**testBufferSource, 0, testBufferSize); 422 423 vk::DescriptorSetUpdateBuilder() 424 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBuffer) 425 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descTestUniform) 426 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBufferSource) 427 .update(vk, device); 428 } 429 430 // Create output image 431 de::MovePtr<vk::ImageWithMemory> colorImage (createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex, 432 RENDER_WIDTH, RENDER_HEIGHT, 433 m_imageFormat, 434 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_SAMPLED_BIT)); 435 vk::Unique<vk::VkImageView> colorImageView (createImageView(ctx, **colorImage, m_imageFormat)); 436 vk::Unique<vk::VkRenderPass> renderPass (createRenderPass(ctx, m_imageFormat)); 437 vk::Unique<vk::VkFramebuffer> framebuffer (createFramebuffer(ctx, RENDER_WIDTH, RENDER_HEIGHT, *renderPass, *colorImageView)); 438 439 // Build pipeline 440 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout)); 441 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, PROTECTION_ENABLED, queueFamilyIndex)); 442 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY)); 443 444 // Create pipeline 445 vk::Unique<vk::VkPipeline> graphicsPipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, 446 *vertexShader, *testShader, 447 std::vector<vk::VkVertexInputBindingDescription>(), 448 std::vector<vk::VkVertexInputAttributeDescription>(), 449 tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT), 450 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)); 451 452 beginCommandBuffer(vk, *cmdBuffer); 453 454 if (m_testType == SSBO_READ || m_testType == SSBO_ATOMIC) 455 { 456 vk::VkBuffer targetBuffer = (m_testType == SSBO_ATOMIC) ? **testBuffer : **testBufferSource; 457 addBufferCopyCmd(vk, *cmdBuffer, queueFamilyIndex, **testUniform, targetBuffer, testUniformSize); 458 } 459 460 // Start image barrier 461 { 462 const vk::VkImageMemoryBarrier startImgBarrier = 463 { 464 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType 465 DE_NULL, // pNext 466 0, // srcAccessMask 467 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask 468 vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout 469 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout 470 queueFamilyIndex, // srcQueueFamilyIndex 471 queueFamilyIndex, // dstQueueFamilyIndex 472 **colorImage, // image 473 { 474 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask 475 0u, // baseMipLevel 476 1u, // mipLevels 477 0u, // baseArraySlice 478 1u, // subresourceRange 479 } 480 }; 481 482 vk.cmdPipelineBarrier(*cmdBuffer, 483 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 484 vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 485 (vk::VkDependencyFlags)0, 486 0, (const vk::VkMemoryBarrier*)DE_NULL, 487 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 488 1, &startImgBarrier); 489 } 490 491 const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.125f, 0.25f, 0.5f, 1.0f); 492 const vk::VkRenderPassBeginInfo renderPassBeginInfo = 493 { 494 vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType; 495 DE_NULL, // const void* pNext; 496 *renderPass, // VkRenderPass renderPass; 497 *framebuffer, // VkFramebuffer framebuffer; 498 { { 0, 0 }, { RENDER_WIDTH, RENDER_HEIGHT } }, // VkRect2D renderArea; 499 1u, // deUint32 attachmentCount; 500 &clearValue // const VkClearValue* pAttachmentClearValues; 501 }; 502 503 vk.cmdBeginRenderPass(*cmdBuffer, &renderPassBeginInfo, vk::VK_SUBPASS_CONTENTS_INLINE); 504 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline); 505 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL); 506 507 vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u); 508 vk.cmdEndRenderPass(*cmdBuffer); 509 510 { 511 const vk::VkImageMemoryBarrier endImgBarrier = 512 { 513 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType 514 DE_NULL, // pNext 515 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask 516 vk::VK_ACCESS_SHADER_READ_BIT, // dstAccessMask 517 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout 518 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // newLayout 519 queueFamilyIndex, // srcQueueFamilyIndex 520 queueFamilyIndex, // dstQueueFamilyIndex 521 **colorImage, // image 522 { 523 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask 524 0u, // baseMipLevel 525 1u, // mipLevels 526 0u, // baseArraySlice 527 1u, // subresourceRange 528 } 529 }; 530 vk.cmdPipelineBarrier(*cmdBuffer, 531 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 532 vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 533 (vk::VkDependencyFlags)0, 534 0, (const vk::VkMemoryBarrier*)DE_NULL, 535 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 536 1, &endImgBarrier); 537 } 538 539 VK_CHECK(vk.endCommandBuffer(*cmdBuffer)); 540 541 // Execute Draw 542 { 543 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device)); 544 VK_CHECK(vk.resetFences(device, 1, &fence.get())); 545 VK_CHECK(queueSubmit(ctx, PROTECTION_ENABLED, queue, *cmdBuffer, *fence, ~0ull)); 546 } 547 548 // Log inputs 549 ctx.getTestContext().getLog() 550 << tcu::TestLog::Message << "Input values: \n" 551 << "1: " << m_testInput << "\n" 552 << tcu::TestLog::EndMessage; 553 554 // Validate buffer 555 if (m_validator.validateBuffer(ctx, **testBuffer)) 556 return tcu::TestStatus::pass("Everything went OK"); 557 else 558 return tcu::TestStatus::fail("Something went really wrong"); 559 } 560 561 template<typename T> 562 tcu::TestStatus StorageBufferTestInstance<T>::executeComputeTest(void) 563 { 564 ProtectedContext& ctx (m_protectedContext); 565 const vk::DeviceInterface& vk = ctx.getDeviceInterface(); 566 const vk::VkDevice device = ctx.getDevice(); 567 const vk::VkQueue queue = ctx.getQueue(); 568 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex(); 569 570 const deUint32 testUniformSize = sizeof(m_testInput); 571 de::UniquePtr<vk::BufferWithMemory> testUniform (makeBuffer(ctx, 572 PROTECTION_DISABLED, 573 queueFamilyIndex, 574 testUniformSize, 575 vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT 576 | vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 577 vk::MemoryRequirement::HostVisible)); 578 579 // Set the test input uniform data 580 { 581 deMemcpy(testUniform->getAllocation().getHostPtr(), &m_testInput, testUniformSize); 582 vk::flushMappedMemoryRange(vk, device, testUniform->getAllocation().getMemory(), testUniform->getAllocation().getOffset(), testUniformSize); 583 } 584 585 const deUint32 testBufferSize = sizeof(ValidationDataStorage<T>); 586 de::MovePtr<vk::BufferWithMemory> testBuffer (makeBuffer(ctx, 587 PROTECTION_ENABLED, 588 queueFamilyIndex, 589 testBufferSize, 590 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT 591 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, 592 vk::MemoryRequirement::Protected)); 593 de::MovePtr<vk::BufferWithMemory> testBufferSource (makeBuffer(ctx, 594 PROTECTION_ENABLED, 595 queueFamilyIndex, 596 testBufferSize, 597 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT 598 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, 599 vk::MemoryRequirement::Protected)); 600 601 vk::Unique<vk::VkShaderModule> testShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("TestShader"), 0)); 602 603 // Create descriptors 604 vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(vk::DescriptorSetLayoutBuilder() 605 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT) 606 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT) 607 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT) 608 .build(vk, device)); 609 vk::Unique<vk::VkDescriptorPool> descriptorPool(vk::DescriptorPoolBuilder() 610 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u) 611 .addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u) 612 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u) 613 .build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); 614 vk::Unique<vk::VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout)); 615 616 // Update descriptor set information 617 { 618 vk::VkDescriptorBufferInfo descTestBuffer = makeDescriptorBufferInfo(**testBuffer, 0, testBufferSize); 619 vk::VkDescriptorBufferInfo descTestUniform = makeDescriptorBufferInfo(**testUniform, 0, testUniformSize); 620 vk::VkDescriptorBufferInfo descTestBufferSource = makeDescriptorBufferInfo(**testBufferSource, 0, testBufferSize); 621 622 vk::DescriptorSetUpdateBuilder() 623 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBuffer) 624 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descTestUniform) 625 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBufferSource) 626 .update(vk, device); 627 } 628 629 630 // Build and execute test 631 { 632 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device)); 633 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout)); 634 vk::Unique<vk::VkPipeline> SSBOPipeline (makeComputePipeline(vk, device, *pipelineLayout, *testShader, DE_NULL)); 635 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, PROTECTION_ENABLED, queueFamilyIndex)); 636 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY)); 637 deUint32 dispatchCount = (m_testType == SSBO_ATOMIC) ? 4u : 1u; 638 639 beginCommandBuffer(vk, *cmdBuffer); 640 641 if (m_testType == SSBO_READ || m_testType == SSBO_ATOMIC) 642 { 643 vk::VkBuffer targetBuffer = (m_testType == SSBO_ATOMIC) ? **testBuffer : **testBufferSource; 644 addBufferCopyCmd(vk, *cmdBuffer, queueFamilyIndex, **testUniform, targetBuffer, testUniformSize); 645 } 646 647 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *SSBOPipeline); 648 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL); 649 650 vk.cmdDispatch(*cmdBuffer, dispatchCount, 1u, 1u); 651 652 VK_CHECK(vk.endCommandBuffer(*cmdBuffer)); 653 VK_CHECK(queueSubmit(ctx, PROTECTION_ENABLED, queue, *cmdBuffer, *fence, ~0ull)); 654 } 655 656 ctx.getTestContext().getLog() 657 << tcu::TestLog::Message << "Input values: \n" 658 << "1: " << m_testInput << "\n" 659 << tcu::TestLog::EndMessage; 660 661 // Validate buffer 662 if (m_validator.validateBuffer(ctx, **testBuffer)) 663 return tcu::TestStatus::pass("Everything went OK"); 664 else 665 return tcu::TestStatus::fail("Something went really wrong"); 666 } 667 668 template<typename T> 669 tcu::TestStatus StorageBufferTestInstance<T>::iterate(void) 670 { 671 switch (m_shaderType) 672 { 673 case glu::SHADERTYPE_FRAGMENT: return executeFragmentTest(); 674 case glu::SHADERTYPE_COMPUTE: return executeComputeTest(); 675 default: 676 DE_FATAL("Incorrect shader type"); return tcu::TestStatus::fail(""); 677 } 678 } 679 680 tcu::TestCaseGroup* createSpecifiedStorageBufferTests (tcu::TestContext& testCtx, 681 const std::string groupName, 682 SSBOTestType testType, 683 const glu::ShaderType shaderType, 684 const ValidationDataStorage<tcu::UVec4> testData[], 685 size_t testCount) 686 { 687 const std::string testTypeStr = getSSBOTypeString(testType); 688 const std::string description = "Storage buffer " + testTypeStr + " tests"; 689 de::MovePtr<tcu::TestCaseGroup> testGroup (new tcu::TestCaseGroup(testCtx, groupName.c_str(), description.c_str())); 690 691 for (size_t ndx = 0; ndx < testCount; ++ndx) 692 { 693 const std::string name = testTypeStr + "_" + de::toString(ndx + 1); 694 testGroup->addChild(new StorageBufferTestCase<tcu::UVec4>(testCtx, testType, shaderType, name.c_str(), testData[ndx].values, testData[ndx])); 695 } 696 697 return testGroup.release(); 698 } 699 700 tcu::TestCaseGroup* createRandomizedBufferTests (tcu::TestContext& testCtx, SSBOTestType testType, const glu::ShaderType shaderType, size_t testCount) 701 { 702 de::Random rnd (testCtx.getCommandLine().getBaseSeed()); 703 std::vector<ValidationDataStorage<tcu::UVec4> > testData; 704 testData.resize(testCount); 705 706 for (size_t ndx = 0; ndx < testCount; ++ndx) 707 testData[ndx].values = tcu::UVec4(rnd.getUint32(), rnd.getUint32(), rnd.getUint32(), rnd.getUint32()); 708 709 return createSpecifiedStorageBufferTests(testCtx, "random", testType, shaderType, testData.data(), testData.size()); 710 } 711 712 tcu::TestCaseGroup* createRWStorageBufferTests (tcu::TestContext& testCtx, 713 const std::string groupName, 714 const std::string groupDescription, 715 SSBOTestType testType, 716 const ValidationDataStorage<tcu::UVec4> testData[], 717 size_t testCount) 718 { 719 de::MovePtr<tcu::TestCaseGroup> ssboRWTestGroup (new tcu::TestCaseGroup(testCtx, groupName.c_str(), groupDescription.c_str())); 720 721 glu::ShaderType shaderTypes[] = { 722 glu::SHADERTYPE_FRAGMENT, 723 glu::SHADERTYPE_COMPUTE 724 }; 725 726 for (int shaderNdx = 0; shaderNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderNdx) 727 { 728 const glu::ShaderType shaderType = shaderTypes[shaderNdx]; 729 const std::string shaderName = getShaderTypeString(shaderType); 730 const std::string shaderGroupDesc = "Storage buffer tests for shader type: " + shaderName; 731 de::MovePtr<tcu::TestCaseGroup> testShaderGroup (new tcu::TestCaseGroup(testCtx, shaderName.c_str(), shaderGroupDesc.c_str())); 732 733 testShaderGroup->addChild(createSpecifiedStorageBufferTests(testCtx, "static", testType, shaderType, testData, testCount)); 734 testShaderGroup->addChild(createRandomizedBufferTests(testCtx, testType, shaderType, RANDOM_TEST_COUNT)); 735 ssboRWTestGroup->addChild(testShaderGroup.release()); 736 } 737 738 return ssboRWTestGroup.release(); 739 } 740 741 void calculateAtomicOpData (SSBOAtomicType type, 742 const tcu::UVec4& inputValue, 743 const deUint32 atomicArg, 744 std::string& atomicCall, 745 tcu::UVec4& refValue, 746 const deUint32 swapNdx = 0) 747 { 748 switch (type) 749 { 750 case ATOMIC_ADD: 751 { 752 refValue = inputValue + tcu::UVec4(atomicArg); 753 atomicCall = "atomicAdd(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);"; 754 break; 755 } 756 case ATOMIC_MIN: 757 { 758 refValue = tcu::UVec4(std::min(inputValue.x(), atomicArg), std::min(inputValue.y(), atomicArg), std::min(inputValue.z(), atomicArg), std::min(inputValue.w(), atomicArg)); 759 atomicCall = "atomicMin(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);"; 760 break; 761 } 762 case ATOMIC_MAX: 763 { 764 refValue = tcu::UVec4(std::max(inputValue.x(), atomicArg), std::max(inputValue.y(), atomicArg), std::max(inputValue.z(), atomicArg), std::max(inputValue.w(), atomicArg)); 765 atomicCall = "atomicMax(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);"; 766 break; 767 } 768 case ATOMIC_AND: 769 { 770 refValue = tcu::UVec4(inputValue.x() & atomicArg, inputValue.y() & atomicArg, inputValue.z() & atomicArg, inputValue.w() & atomicArg); 771 atomicCall = "atomicAnd(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);"; 772 break; 773 } 774 case ATOMIC_OR: 775 { 776 refValue = tcu::UVec4(inputValue.x() | atomicArg, inputValue.y() | atomicArg, inputValue.z() | atomicArg, inputValue.w() | atomicArg); 777 atomicCall = "atomicOr(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);"; 778 break; 779 } 780 case ATOMIC_XOR: 781 { 782 refValue = tcu::UVec4(inputValue.x() ^ atomicArg, inputValue.y() ^ atomicArg, inputValue.z() ^ atomicArg, inputValue.w() ^ atomicArg); 783 atomicCall = "atomicXor(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);"; 784 break; 785 } 786 case ATOMIC_EXCHANGE: 787 { 788 refValue = tcu::UVec4(atomicArg); 789 atomicCall = "atomicExchange(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);"; 790 break; 791 } 792 case ATOMIC_COMPSWAP: 793 { 794 int selectedNdx = swapNdx % 4; 795 deUint32 selectedChange = inputValue[selectedNdx]; 796 797 refValue = inputValue; 798 refValue[selectedNdx] = atomicArg; 799 atomicCall = "atomicCompSwap(protectedTestResultBuffer[i], " + de::toString(selectedChange) + "u, " + de::toString(atomicArg) + "u);"; 800 break; 801 } 802 default: DE_FATAL("Incorrect atomic function type"); break; 803 } 804 805 } 806 807 } // anonymous 808 809 tcu::TestCaseGroup* createReadStorageBufferTests (tcu::TestContext& testCtx) 810 { 811 const ValidationDataStorage<tcu::UVec4> testData[] = { 812 { tcu::UVec4(0u, 0u, 0u, 0u) }, { tcu::UVec4(1u, 0u, 0u, 0u) }, 813 { tcu::UVec4(0u, 1u, 0u, 0u) }, { tcu::UVec4(0u, 0u, 1u, 0u) }, 814 { tcu::UVec4(0u, 0u, 0u, 1u) }, { tcu::UVec4(1u, 1u, 1u, 1u) } 815 }; 816 817 return createRWStorageBufferTests(testCtx, "ssbo_read", "Storage Buffer Read Tests", SSBO_READ, testData, DE_LENGTH_OF_ARRAY(testData)); 818 } 819 820 tcu::TestCaseGroup* createWriteStorageBufferTests (tcu::TestContext& testCtx) 821 { 822 const ValidationDataStorage<tcu::UVec4> testData[] = { 823 { tcu::UVec4(0u, 0u, 0u, 0u) }, { tcu::UVec4(1u, 0u, 0u, 0u) }, 824 { tcu::UVec4(0u, 1u, 0u, 0u) }, { tcu::UVec4(0u, 0u, 1u, 0u) }, 825 { tcu::UVec4(0u, 0u, 0u, 1u) }, { tcu::UVec4(1u, 1u, 1u, 1u) } 826 }; 827 828 return createRWStorageBufferTests(testCtx, "ssbo_write", "Storage Buffer Write Tests", SSBO_WRITE, testData, DE_LENGTH_OF_ARRAY(testData)); 829 } 830 831 tcu::TestCaseGroup* createAtomicStorageBufferTests (tcu::TestContext& testctx) 832 { 833 struct { 834 const tcu::UVec4 input; 835 const deUint32 atomicArg; 836 const deUint32 swapNdx; 837 } testData[] = { 838 { tcu::UVec4(0u, 1u, 2u, 3u), 10u, 0u }, 839 { tcu::UVec4(10u, 20u, 30u, 40u), 3u, 2u }, 840 { tcu::UVec4(800u, 400u, 230u, 999u), 50u, 3u }, 841 { tcu::UVec4(100800u, 233400u, 22230u, 77999u), 800u, 1u }, 842 }; 843 844 SSBOAtomicType testTypes[] = { 845 ATOMIC_ADD, 846 ATOMIC_MIN, 847 ATOMIC_MAX, 848 ATOMIC_AND, 849 ATOMIC_OR, 850 ATOMIC_XOR, 851 ATOMIC_EXCHANGE, 852 ATOMIC_COMPSWAP 853 }; 854 855 glu::ShaderType shaderTypes[] = { 856 glu::SHADERTYPE_FRAGMENT, 857 glu::SHADERTYPE_COMPUTE 858 }; 859 860 de::Random rnd (testctx.getCommandLine().getBaseSeed()); 861 de::MovePtr<tcu::TestCaseGroup> ssboAtomicTests (new tcu::TestCaseGroup(testctx, "ssbo_atomic", "Storage Buffer Atomic Tests")); 862 863 for (int shaderNdx = 0; shaderNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderNdx) 864 { 865 const glu::ShaderType shaderType = shaderTypes[shaderNdx]; 866 const std::string shaderName = getShaderTypeString(shaderType); 867 const std::string shaderDesc = "Storage Buffer Atomic Tests for shader type: " + shaderName; 868 de::MovePtr<tcu::TestCaseGroup> atomicShaderGroup (new tcu::TestCaseGroup(testctx, shaderName.c_str(), shaderDesc.c_str())); 869 870 for (int typeNdx = 0; typeNdx < DE_LENGTH_OF_ARRAY(testTypes); ++typeNdx) 871 { 872 SSBOAtomicType atomicType = testTypes[typeNdx]; 873 const std::string atomicTypeStr = getSSBOAtomicTypeString(atomicType); 874 const std::string atomicDesc = "Storage Buffer Atomic Tests: " + atomicTypeStr; 875 876 de::MovePtr<tcu::TestCaseGroup> staticTests (new tcu::TestCaseGroup(testctx, "static", (atomicDesc + " with static input").c_str())); 877 for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(testData); ++ndx) 878 { 879 const std::string name = "atomic_" + atomicTypeStr + "_" + de::toString(ndx + 1); 880 const tcu::UVec4& inputValue = testData[ndx].input; 881 const deUint32& atomicArg = testData[ndx].atomicArg; 882 std::string atomicCall; 883 tcu::UVec4 refValue; 884 885 calculateAtomicOpData(atomicType, inputValue, atomicArg, atomicCall, refValue, testData[ndx].swapNdx); 886 887 ValidationDataStorage<tcu::UVec4> validationData = { refValue }; 888 staticTests->addChild(new StorageBufferTestCase<tcu::UVec4>(testctx, SSBO_ATOMIC, shaderType, name.c_str(), inputValue, validationData, atomicCall)); 889 } 890 891 de::MovePtr<tcu::TestCaseGroup> randomTests (new tcu::TestCaseGroup(testctx, "random", (atomicDesc + " with random input").c_str())); 892 for (int ndx = 0; ndx < RANDOM_TEST_COUNT; ndx++) 893 { 894 const std::string name = "atomic_" + atomicTypeStr + "_" + de::toString(ndx + 1); 895 deUint32 atomicArg = rnd.getUint16(); 896 tcu::UVec4 inputValue; 897 tcu::UVec4 refValue; 898 std::string atomicCall; 899 900 for (int i = 0; i < 4; i++) 901 inputValue[i] = rnd.getUint16(); 902 903 calculateAtomicOpData(atomicType, inputValue, atomicArg, atomicCall, refValue, ndx); 904 905 ValidationDataStorage<tcu::UVec4> validationData = { refValue }; 906 randomTests->addChild(new StorageBufferTestCase<tcu::UVec4>(testctx, SSBO_ATOMIC, shaderType, name.c_str(), inputValue, validationData, atomicCall)); 907 908 } 909 910 de::MovePtr<tcu::TestCaseGroup> atomicTests (new tcu::TestCaseGroup(testctx, atomicTypeStr.c_str(), atomicDesc.c_str())); 911 atomicTests->addChild(staticTests.release()); 912 atomicTests->addChild(randomTests.release()); 913 atomicShaderGroup->addChild(atomicTests.release()); 914 } 915 ssboAtomicTests->addChild(atomicShaderGroup.release()); 916 } 917 918 return ssboAtomicTests.release(); 919 } 920 921 } // ProtectedMem 922 } // vkt 923