1 /*------------------------------------------------------------------------ 2 * Vulkan Conformance Tests 3 * ------------------------ 4 * 5 * Copyright (c) 2016 The Khronos Group Inc. 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); 8 * you may not use this file except in compliance with the License. 9 * You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 * 19 *//*! 20 * \file 21 * \brief Synchronization internally synchronized objects tests 22 *//*--------------------------------------------------------------------*/ 23 24 #include "vktSynchronizationInternallySynchronizedObjectsTests.hpp" 25 #include "vktTestCaseUtil.hpp" 26 #include "vktSynchronizationUtil.hpp" 27 28 #include "vkRef.hpp" 29 #include "tcuDefs.hpp" 30 #include "vkTypeUtil.hpp" 31 #include "vkPlatform.hpp" 32 #include "vkBuilderUtil.hpp" 33 #include "vkImageUtil.hpp" 34 35 #include "tcuResultCollector.hpp" 36 37 #include "deThread.hpp" 38 #include "deMutex.hpp" 39 #include "deSharedPtr.hpp" 40 41 42 #include <limits> 43 #include <iterator> 44 45 namespace vkt 46 { 47 namespace synchronization 48 { 49 namespace 50 { 51 using namespace vk; 52 53 using std::vector; 54 using std::string; 55 using std::map; 56 using std::exception; 57 using std::ostringstream; 58 59 using tcu::TestStatus; 60 using tcu::TestContext; 61 using tcu::ResultCollector; 62 using tcu::TestException; 63 64 using de::UniquePtr; 65 using de::MovePtr; 66 using de::SharedPtr; 67 using de::Mutex; 68 using de::Thread; 69 using de::clamp; 70 71 enum {EXECUTION_PER_THREAD = 100, BUFFER_ELEMENT_COUNT = 16, BUFFER_SIZE = BUFFER_ELEMENT_COUNT*4 }; 72 73 class MultiQueues 74 { 75 typedef struct QueueType 76 { 77 vector<VkQueue> queues; 78 vector<bool> available; 79 } Queues; 80 81 public: 82 83 inline void addQueueFamilyIndex (const deUint32& queueFamilyIndex, const deUint32& count) 84 { 85 Queues temp; 86 vector<bool>::iterator it; 87 it = temp.available.begin(); 88 temp.available.insert(it, count, false); 89 90 temp.queues.resize(count); 91 m_queues[queueFamilyIndex] = temp; 92 } 93 94 const deUint32& getQueueFamilyIndex (const int index) 95 { 96 map<deUint32,Queues>::iterator it = m_queues.begin(); 97 advance (it, index); 98 return it->first; 99 } 100 101 inline size_t countQueueFamilyIndex (void) 102 { 103 return m_queues.size(); 104 } 105 106 Queues & getQueues (const int index) 107 { 108 map<deUint32,Queues>::iterator it = m_queues.begin(); 109 advance (it, index); 110 return it->second; 111 } 112 113 bool getFreeQueue (deUint32& returnQueueFamilyIndex, VkQueue& returnQueues, int& returnQueueIndex) 114 { 115 for (int queueFamilyIndexNdx = 0 ; queueFamilyIndexNdx < static_cast<int>(m_queues.size()); ++queueFamilyIndexNdx) 116 { 117 Queues& queue = m_queues[getQueueFamilyIndex(queueFamilyIndexNdx)]; 118 for (int queueNdx = 0; queueNdx < static_cast<int>(queue.queues.size()); ++queueNdx) 119 { 120 m_mutex.lock(); 121 if (queue.available[queueNdx]) 122 { 123 queue.available[queueNdx] = false; 124 returnQueueFamilyIndex = getQueueFamilyIndex(queueFamilyIndexNdx); 125 returnQueues = queue.queues[queueNdx]; 126 returnQueueIndex = queueNdx; 127 m_mutex.unlock(); 128 return true; 129 } 130 m_mutex.unlock(); 131 } 132 } 133 return false; 134 } 135 136 void releaseQueue (const deUint32& queueFamilyIndex, const int& queueIndex) 137 { 138 m_mutex.lock(); 139 m_queues[queueFamilyIndex].available[queueIndex] = true; 140 m_mutex.unlock(); 141 } 142 143 inline void setDevice (Move<VkDevice> device) 144 { 145 m_logicalDevice = device; 146 } 147 148 inline VkDevice getDevice (void) 149 { 150 return *m_logicalDevice; 151 } 152 153 MovePtr<Allocator> m_allocator; 154 protected: 155 Move<VkDevice> m_logicalDevice; 156 map<deUint32,Queues> m_queues; 157 Mutex m_mutex; 158 159 }; 160 161 MovePtr<Allocator> createAllocator (const Context& context, const VkDevice& device) 162 { 163 const DeviceInterface& deviceInterface = context.getDeviceInterface(); 164 const InstanceInterface& instance = context.getInstanceInterface(); 165 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice(); 166 const VkPhysicalDeviceMemoryProperties deviceMemoryProperties = getPhysicalDeviceMemoryProperties(instance, physicalDevice); 167 168 // Create memory allocator for device 169 return MovePtr<Allocator> (new SimpleAllocator(deviceInterface, device, deviceMemoryProperties)); 170 } 171 172 bool checkQueueFlags (const VkQueueFlags& availableFlag, const VkQueueFlags& neededFlag) 173 { 174 if (VK_QUEUE_TRANSFER_BIT == neededFlag) 175 { 176 if ( (availableFlag & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT || 177 (availableFlag & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT || 178 (availableFlag & VK_QUEUE_TRANSFER_BIT) == VK_QUEUE_TRANSFER_BIT 179 ) 180 return true; 181 } 182 else if ((availableFlag & neededFlag) == neededFlag) 183 { 184 return true; 185 } 186 return false; 187 } 188 189 MovePtr<MultiQueues> createQueues (const Context& context, const VkQueueFlags& queueFlag) 190 { 191 const DeviceInterface& vk = context.getDeviceInterface(); 192 const InstanceInterface& instance = context.getInstanceInterface(); 193 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice(); 194 MovePtr<MultiQueues> moveQueues (new MultiQueues()); 195 MultiQueues& queues = *moveQueues; 196 VkDeviceCreateInfo deviceInfo; 197 VkPhysicalDeviceFeatures deviceFeatures; 198 vector<VkQueueFamilyProperties> queueFamilyProperties; 199 vector<float> queuePriorities; 200 vector<VkDeviceQueueCreateInfo> queueInfos; 201 202 queueFamilyProperties = getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice); 203 204 for (deUint32 queuePropertiesNdx = 0; queuePropertiesNdx < queueFamilyProperties.size(); ++queuePropertiesNdx) 205 { 206 if (checkQueueFlags(queueFamilyProperties[queuePropertiesNdx].queueFlags, queueFlag)) 207 { 208 queues.addQueueFamilyIndex(queuePropertiesNdx, queueFamilyProperties[queuePropertiesNdx].queueCount); 209 } 210 } 211 212 if (queues.countQueueFamilyIndex() == 0) 213 { 214 TCU_THROW(NotSupportedError, "Queue not found"); 215 } 216 217 { 218 vector<float>::iterator it = queuePriorities.begin(); 219 unsigned int maxQueueCount = 0; 220 for (int queueFamilyIndexNdx = 0; queueFamilyIndexNdx < static_cast<int>(queues.countQueueFamilyIndex()); ++queueFamilyIndexNdx) 221 { 222 if (queues.getQueues(queueFamilyIndexNdx).queues.size() > maxQueueCount) 223 maxQueueCount = static_cast<unsigned int>(queues.getQueues(queueFamilyIndexNdx).queues.size()); 224 } 225 queuePriorities.insert(it, maxQueueCount, 1.0); 226 } 227 228 for (int queueFamilyIndexNdx = 0; queueFamilyIndexNdx < static_cast<int>(queues.countQueueFamilyIndex()); ++queueFamilyIndexNdx) 229 { 230 VkDeviceQueueCreateInfo queueInfo; 231 const deUint32 queueCount = static_cast<deUint32>(queues.getQueues(queueFamilyIndexNdx).queues.size()); 232 233 deMemset(&queueInfo, 0, sizeof(queueInfo)); 234 235 queueInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; 236 queueInfo.pNext = DE_NULL; 237 queueInfo.flags = (VkDeviceQueueCreateFlags)0u; 238 queueInfo.queueFamilyIndex = queues.getQueueFamilyIndex(queueFamilyIndexNdx); 239 queueInfo.queueCount = queueCount; 240 queueInfo.pQueuePriorities = &queuePriorities[0]; 241 242 queueInfos.push_back(queueInfo); 243 } 244 245 deMemset(&deviceInfo, 0, sizeof(deviceInfo)); 246 instance.getPhysicalDeviceFeatures(physicalDevice, &deviceFeatures); 247 248 deviceInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; 249 deviceInfo.pNext = DE_NULL; 250 deviceInfo.enabledExtensionCount = 0u; 251 deviceInfo.ppEnabledExtensionNames = DE_NULL; 252 deviceInfo.enabledLayerCount = 0u; 253 deviceInfo.ppEnabledLayerNames = DE_NULL; 254 deviceInfo.pEnabledFeatures = &deviceFeatures; 255 deviceInfo.queueCreateInfoCount = static_cast<deUint32>(queues.countQueueFamilyIndex()); 256 deviceInfo.pQueueCreateInfos = &queueInfos[0]; 257 258 queues.setDevice(createDevice(instance, physicalDevice, &deviceInfo)); 259 260 for (deUint32 queueFamilyIndex = 0; queueFamilyIndex < queues.countQueueFamilyIndex(); ++queueFamilyIndex) 261 { 262 for (deUint32 queueReqNdx = 0; queueReqNdx < queues.getQueues(queueFamilyIndex).queues.size(); ++queueReqNdx) 263 { 264 vk.getDeviceQueue(queues.getDevice(), queues.getQueueFamilyIndex(queueFamilyIndex), queueReqNdx, &queues.getQueues(queueFamilyIndex).queues[queueReqNdx]); 265 queues.getQueues(queueFamilyIndex).available[queueReqNdx]=true; 266 } 267 } 268 269 queues.m_allocator = createAllocator(context, queues.getDevice()); 270 return moveQueues; 271 } 272 273 Move<VkRenderPass> createRenderPass (const Context& context, const VkDevice& device, const VkFormat& colorFormat) 274 { 275 const DeviceInterface& vk = context.getDeviceInterface(); 276 const VkAttachmentDescription colorAttachmentDescription = 277 { 278 0u, // VkAttachmentDescriptionFlags flags; 279 colorFormat, // VkFormat format; 280 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples; 281 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp; 282 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp; 283 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp; 284 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp; 285 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout; 286 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout; 287 }; 288 const VkAttachmentReference colorAttachmentReference = 289 { 290 0u, // deUint32 attachment; 291 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout; 292 }; 293 const VkSubpassDescription subpassDescription = 294 { 295 0u, // VkSubpassDescriptionFlags flags; 296 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint; 297 0u, // deUint32 inputAttachmentCount; 298 DE_NULL, // const VkAttachmentReference* pInputAttachments; 299 1u, // deUint32 colorAttachmentCount; 300 &colorAttachmentReference, // const VkAttachmentReference* pColorAttachments; 301 DE_NULL, // const VkAttachmentReference* pResolveAttachments; 302 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment; 303 0u, // deUint32 preserveAttachmentCount; 304 DE_NULL // const VkAttachmentReference* pPreserveAttachments; 305 }; 306 const VkRenderPassCreateInfo renderPassParams = 307 { 308 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType; 309 DE_NULL, // const void* pNext; 310 0u, // VkRenderPassCreateFlags flags; 311 1u, // deUint32 attachmentCount; 312 &colorAttachmentDescription, // const VkAttachmentDescription* pAttachments; 313 1u, // deUint32 subpassCount; 314 &subpassDescription, // const VkSubpassDescription* pSubpasses; 315 0u, // deUint32 dependencyCount; 316 DE_NULL // const VkSubpassDependency* pDependencies; 317 }; 318 return createRenderPass(vk, device, &renderPassParams); 319 } 320 321 TestStatus executeComputePipeline (const Context& context, const VkPipeline& pipeline, const VkPipelineLayout& pipelineLayout, 322 const VkDescriptorSetLayout& descriptorSetLayout, MultiQueues& queues, const deUint32& shadersExecutions) 323 { 324 const DeviceInterface& vk = context.getDeviceInterface(); 325 const VkDevice device = queues.getDevice(); 326 deUint32 queueFamilyIndex; 327 VkQueue queue; 328 int queueIndex; 329 while(!queues.getFreeQueue(queueFamilyIndex, queue, queueIndex)){} 330 331 { 332 const Unique<VkDescriptorPool> descriptorPool (DescriptorPoolBuilder() 333 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) 334 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); 335 Buffer resultBuffer (vk, device, *queues.m_allocator, makeBufferCreateInfo(BUFFER_SIZE, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible); 336 const VkBufferMemoryBarrier bufferBarrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *resultBuffer, 0ull, BUFFER_SIZE); 337 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex)); 338 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool)); 339 340 { 341 const Allocation& alloc = resultBuffer.getAllocation(); 342 deMemset(alloc.getHostPtr(), 0, BUFFER_SIZE); 343 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), BUFFER_SIZE); 344 } 345 346 // Start recording commands 347 beginCommandBuffer(vk, *cmdBuffer); 348 349 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); 350 351 // Create descriptor set 352 const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, device, *descriptorPool, descriptorSetLayout)); 353 354 const VkDescriptorBufferInfo resultDescriptorInfo = makeDescriptorBufferInfo(*resultBuffer, 0ull, BUFFER_SIZE); 355 356 DescriptorSetUpdateBuilder() 357 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultDescriptorInfo) 358 .update(vk, device); 359 360 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL); 361 362 // Dispatch indirect compute command 363 vk.cmdDispatch(*cmdBuffer, shadersExecutions, 1u, 1u); 364 365 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 366 0, (const VkMemoryBarrier*)DE_NULL, 367 1, &bufferBarrier, 368 0, (const VkImageMemoryBarrier*)DE_NULL); 369 370 // End recording commands 371 endCommandBuffer(vk, *cmdBuffer); 372 373 // Wait for command buffer execution finish 374 submitCommandsAndWait(vk, device, queue, *cmdBuffer); 375 queues.releaseQueue(queueFamilyIndex, queueIndex); 376 377 { 378 const Allocation& resultAlloc = resultBuffer.getAllocation(); 379 invalidateMappedMemoryRange(vk, device, resultAlloc.getMemory(), resultAlloc.getOffset(), BUFFER_SIZE); 380 381 const deInt32* ptr = reinterpret_cast<deInt32*>(resultAlloc.getHostPtr()); 382 for (deInt32 ndx = 0; ndx < BUFFER_ELEMENT_COUNT; ++ndx) 383 { 384 if (ptr[ndx] != ndx) 385 { 386 return TestStatus::fail("The data don't match"); 387 } 388 } 389 } 390 return TestStatus::pass("Passed"); 391 } 392 } 393 394 395 TestStatus executeGraphicPipeline (const Context& context, const VkPipeline& pipeline, const VkPipelineLayout& pipelineLayout, 396 const VkDescriptorSetLayout& descriptorSetLayout, MultiQueues& queues, const VkRenderPass& renderPass, const deUint32 shadersExecutions) 397 { 398 const DeviceInterface& vk = context.getDeviceInterface(); 399 const VkDevice device = queues.getDevice(); 400 deUint32 queueFamilyIndex; 401 VkQueue queue; 402 int queueIndex; 403 while(!queues.getFreeQueue(queueFamilyIndex, queue, queueIndex)){} 404 405 { 406 const Unique<VkDescriptorPool> descriptorPool (DescriptorPoolBuilder() 407 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) 408 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); 409 Move<VkDescriptorSet> descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, descriptorSetLayout); 410 Buffer resultBuffer (vk, device, *queues.m_allocator, makeBufferCreateInfo(BUFFER_SIZE, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible); 411 const VkBufferMemoryBarrier bufferBarrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *resultBuffer, 0ull, BUFFER_SIZE); 412 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM; 413 const VkExtent3D colorImageExtent = makeExtent3D(1u, 1u, 1u); 414 const VkImageSubresourceRange colorImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u); 415 de::MovePtr<Image> colorAttachmentImage = de::MovePtr<Image>(new Image(vk, device, *queues.m_allocator, 416 makeImageCreateInfo(VK_IMAGE_TYPE_2D, colorImageExtent, colorFormat, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT), 417 MemoryRequirement::Any)); 418 Move<VkImageView> colorAttachmentView = makeImageView(vk, device, **colorAttachmentImage, VK_IMAGE_VIEW_TYPE_2D, colorFormat, colorImageSubresourceRange); 419 Move<VkFramebuffer> framebuffer = makeFramebuffer(vk, device, renderPass, *colorAttachmentView, colorImageExtent.width, colorImageExtent.height, 1u); 420 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex)); 421 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool)); 422 const VkDescriptorBufferInfo outputBufferDescriptorInfo = makeDescriptorBufferInfo(*resultBuffer, 0ull, BUFFER_SIZE); 423 424 DescriptorSetUpdateBuilder() 425 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &outputBufferDescriptorInfo) 426 .update (vk, device); 427 428 { 429 const Allocation& alloc = resultBuffer.getAllocation(); 430 deMemset(alloc.getHostPtr(), 0, BUFFER_SIZE); 431 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), BUFFER_SIZE); 432 } 433 434 // Start recording commands 435 beginCommandBuffer(vk, *cmdBuffer); 436 // Change color attachment image layout 437 { 438 const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier( 439 (VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, 440 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 441 **colorAttachmentImage, colorImageSubresourceRange); 442 443 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0, 444 0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier); 445 } 446 447 { 448 const VkRect2D renderArea = 449 { 450 makeOffset2D(0, 0), 451 makeExtent2D(1, 1), 452 }; 453 const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f); 454 beginRenderPass(vk, *cmdBuffer, renderPass, *framebuffer, renderArea, clearColor); 455 } 456 457 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); 458 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL); 459 460 vk.cmdDraw(*cmdBuffer, shadersExecutions, 1u, 0u, 0u); 461 endRenderPass(vk, *cmdBuffer); 462 463 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 464 0, (const VkMemoryBarrier*)DE_NULL, 465 1, &bufferBarrier, 466 0, (const VkImageMemoryBarrier*)DE_NULL); 467 468 // End recording commands 469 endCommandBuffer(vk, *cmdBuffer); 470 471 // Wait for command buffer execution finish 472 submitCommandsAndWait(vk, device, queue, *cmdBuffer); 473 queues.releaseQueue(queueFamilyIndex, queueIndex); 474 475 { 476 const Allocation& resultAlloc = resultBuffer.getAllocation(); 477 invalidateMappedMemoryRange(vk, device, resultAlloc.getMemory(), resultAlloc.getOffset(), BUFFER_SIZE); 478 479 const deInt32* ptr = reinterpret_cast<deInt32*>(resultAlloc.getHostPtr()); 480 for (deInt32 ndx = 0; ndx < BUFFER_ELEMENT_COUNT; ++ndx) 481 { 482 if (ptr[ndx] != ndx) 483 { 484 return TestStatus::fail("The data don't match"); 485 } 486 } 487 } 488 return TestStatus::pass("Passed"); 489 } 490 } 491 492 493 class ThreadGroupThread : private Thread 494 { 495 public: 496 ThreadGroupThread (const Context& context, VkPipelineCache pipelineCache, const VkPipelineLayout& pipelineLayout, 497 const VkDescriptorSetLayout& descriptorSetLayout, MultiQueues& queues, const vector<deUint32>& shadersExecutions) 498 : m_context (context) 499 , m_pipelineCache (pipelineCache) 500 , m_pipelineLayout (pipelineLayout) 501 , m_descriptorSetLayout (descriptorSetLayout) 502 , m_queues (queues) 503 , m_shadersExecutions (shadersExecutions) 504 { 505 } 506 507 virtual ~ThreadGroupThread (void) 508 { 509 } 510 511 ResultCollector& getResultCollector (void) 512 { 513 return m_resultCollector; 514 } 515 516 using Thread::start; 517 using Thread::join; 518 519 protected: 520 virtual TestStatus runThread () = 0; 521 const Context& m_context; 522 VkPipelineCache m_pipelineCache; 523 const VkPipelineLayout& m_pipelineLayout; 524 const VkDescriptorSetLayout& m_descriptorSetLayout; 525 MultiQueues& m_queues; 526 const vector<deUint32>& m_shadersExecutions; 527 528 private: 529 ThreadGroupThread (const ThreadGroupThread&); 530 ThreadGroupThread& operator= (const ThreadGroupThread&); 531 532 void run (void) 533 { 534 try 535 { 536 TestStatus result = runThread(); 537 m_resultCollector.addResult(result.getCode(), result.getDescription()); 538 } 539 catch (const TestException& e) 540 { 541 m_resultCollector.addResult(e.getTestResult(), e.getMessage()); 542 } 543 catch (const exception& e) 544 { 545 m_resultCollector.addResult(QP_TEST_RESULT_FAIL, e.what()); 546 } 547 catch (...) 548 { 549 m_resultCollector.addResult(QP_TEST_RESULT_FAIL, "Exception"); 550 } 551 } 552 553 ResultCollector m_resultCollector; 554 }; 555 556 class ThreadGroup 557 { 558 typedef vector<SharedPtr<ThreadGroupThread> > ThreadVector; 559 public: 560 ThreadGroup (void) 561 { 562 } 563 ~ThreadGroup (void) 564 { 565 } 566 567 void add (MovePtr<ThreadGroupThread> thread) 568 { 569 m_threads.push_back(SharedPtr<ThreadGroupThread>(thread.release())); 570 } 571 572 TestStatus run (void) 573 { 574 ResultCollector resultCollector; 575 576 for (ThreadVector::iterator threadIter = m_threads.begin(); threadIter != m_threads.end(); ++threadIter) 577 (*threadIter)->start(); 578 579 for (ThreadVector::iterator threadIter = m_threads.begin(); threadIter != m_threads.end(); ++threadIter) 580 { 581 ResultCollector& threadResult = (*threadIter)->getResultCollector(); 582 (*threadIter)->join(); 583 resultCollector.addResult(threadResult.getResult(), threadResult.getMessage()); 584 } 585 586 return TestStatus(resultCollector.getResult(), resultCollector.getMessage()); 587 } 588 589 private: 590 ThreadVector m_threads; 591 }; 592 593 594 class CreateComputeThread : public ThreadGroupThread 595 { 596 public: 597 CreateComputeThread (const Context& context, VkPipelineCache pipelineCache, vector<VkComputePipelineCreateInfo>& pipelineInfo, 598 const VkPipelineLayout& pipelineLayout, const VkDescriptorSetLayout& descriptorSetLayout, 599 MultiQueues& queues, const vector<deUint32>& shadersExecutions) 600 : ThreadGroupThread (context, pipelineCache, pipelineLayout, descriptorSetLayout, queues, shadersExecutions) 601 , m_pipelineInfo (pipelineInfo) 602 { 603 } 604 605 TestStatus runThread (void) 606 { 607 ResultCollector resultCollector; 608 for (int executionNdx = 0; executionNdx < EXECUTION_PER_THREAD; ++executionNdx) 609 { 610 const int shaderNdx = executionNdx % (int)m_pipelineInfo.size(); 611 const DeviceInterface& vk = m_context.getDeviceInterface(); 612 const VkDevice device = m_queues.getDevice(); 613 Move<VkPipeline> pipeline = createComputePipeline(vk,device,m_pipelineCache, &m_pipelineInfo[shaderNdx]); 614 615 TestStatus result = executeComputePipeline(m_context, *pipeline, m_pipelineLayout, m_descriptorSetLayout, m_queues, m_shadersExecutions[shaderNdx]); 616 resultCollector.addResult(result.getCode(), result.getDescription()); 617 } 618 return TestStatus(resultCollector.getResult(), resultCollector.getMessage()); 619 } 620 private: 621 vector<VkComputePipelineCreateInfo>& m_pipelineInfo; 622 }; 623 624 class CreateGraphicThread : public ThreadGroupThread 625 { 626 public: 627 CreateGraphicThread (const Context& context, VkPipelineCache pipelineCache, vector<VkGraphicsPipelineCreateInfo>& pipelineInfo, 628 const VkPipelineLayout& pipelineLayout, const VkDescriptorSetLayout& descriptorSetLayout, 629 MultiQueues& queues, const VkRenderPass& renderPass, const vector<deUint32>& shadersExecutions) 630 : ThreadGroupThread (context, pipelineCache, pipelineLayout, descriptorSetLayout, queues, shadersExecutions) 631 , m_pipelineInfo (pipelineInfo) 632 , m_renderPass (renderPass) 633 {} 634 635 TestStatus runThread (void) 636 { 637 ResultCollector resultCollector; 638 for (int executionNdx = 0; executionNdx < EXECUTION_PER_THREAD; ++executionNdx) 639 { 640 const int shaderNdx = executionNdx % (int)m_pipelineInfo.size(); 641 const DeviceInterface& vk = m_context.getDeviceInterface(); 642 const VkDevice device = m_queues.getDevice(); 643 Move<VkPipeline> pipeline = createGraphicsPipeline(vk,device, m_pipelineCache, &m_pipelineInfo[shaderNdx]); 644 645 TestStatus result = executeGraphicPipeline(m_context, *pipeline, m_pipelineLayout, m_descriptorSetLayout, m_queues, m_renderPass, m_shadersExecutions[shaderNdx]); 646 resultCollector.addResult(result.getCode(), result.getDescription()); 647 } 648 return TestStatus(resultCollector.getResult(), resultCollector.getMessage()); 649 } 650 651 private: 652 vector<VkGraphicsPipelineCreateInfo>& m_pipelineInfo; 653 const VkRenderPass& m_renderPass; 654 }; 655 656 class PipelineCacheComputeTestInstance : public TestInstance 657 { 658 typedef vector<SharedPtr<Unique<VkShaderModule> > > ShaderModuleVector; 659 public: 660 PipelineCacheComputeTestInstance (Context& context, const vector<deUint32>& shadersExecutions) 661 : TestInstance (context) 662 , m_shadersExecutions (shadersExecutions) 663 664 { 665 } 666 667 TestStatus iterate (void) 668 { 669 const DeviceInterface& vk = m_context.getDeviceInterface(); 670 MovePtr<MultiQueues> queues = createQueues(m_context, VK_QUEUE_COMPUTE_BIT); 671 const VkDevice device = queues->getDevice(); 672 ShaderModuleVector shaderCompModules = addShaderModules(device); 673 Buffer resultBuffer (vk, device, *queues->m_allocator, makeBufferCreateInfo(BUFFER_SIZE, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible); 674 const Move<VkDescriptorSetLayout> descriptorSetLayout (DescriptorSetLayoutBuilder() 675 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT) 676 .build(vk, device)); 677 const Move<VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout)); 678 vector<VkPipelineShaderStageCreateInfo> shaderStageInfos = addShaderStageInfo(shaderCompModules); 679 vector<VkComputePipelineCreateInfo> pipelineInfo = addPipelineInfo(*pipelineLayout, shaderStageInfos); 680 const VkPipelineCacheCreateInfo pipelineCacheInfo = 681 { 682 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType; 683 DE_NULL, // const void* pNext; 684 0u, // VkPipelineCacheCreateFlags flags; 685 0u, // deUintptr initialDataSize; 686 DE_NULL, // const void* pInitialData; 687 }; 688 Move<VkPipelineCache> pipelineCache = createPipelineCache(vk, device, &pipelineCacheInfo); 689 Move<VkPipeline> pipeline = createComputePipeline(vk, device, *pipelineCache, &pipelineInfo[0]); 690 const deUint32 numThreads = clamp(deGetNumAvailableLogicalCores(), 4u, 32u); 691 ThreadGroup threads; 692 693 executeComputePipeline(m_context, *pipeline, *pipelineLayout, *descriptorSetLayout, *queues, m_shadersExecutions[0]); 694 695 for (deUint32 ndx = 0; ndx < numThreads; ++ndx) 696 threads.add(MovePtr<ThreadGroupThread>(new CreateComputeThread( 697 m_context, *pipelineCache, pipelineInfo, *pipelineLayout, *descriptorSetLayout, *queues, m_shadersExecutions))); 698 699 { 700 TestStatus thread_result = threads.run(); 701 if(thread_result.getCode()) 702 { 703 return thread_result; 704 } 705 } 706 return TestStatus::pass("Passed"); 707 } 708 709 private: 710 ShaderModuleVector addShaderModules (const VkDevice& device) 711 { 712 const DeviceInterface& vk = m_context.getDeviceInterface(); 713 ShaderModuleVector shaderCompModules; 714 shaderCompModules.resize(m_shadersExecutions.size()); 715 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()); ++shaderNdx) 716 { 717 ostringstream shaderName; 718 shaderName<<"compute_"<<shaderNdx; 719 shaderCompModules[shaderNdx] = SharedPtr<Unique<VkShaderModule> > (new Unique<VkShaderModule>(createShaderModule(vk, device, m_context.getBinaryCollection().get(shaderName.str()), (VkShaderModuleCreateFlags)0))); 720 } 721 return shaderCompModules; 722 } 723 724 vector<VkPipelineShaderStageCreateInfo> addShaderStageInfo (const ShaderModuleVector& shaderCompModules) 725 { 726 VkPipelineShaderStageCreateInfo shaderStageInfo; 727 vector<VkPipelineShaderStageCreateInfo> shaderStageInfos; 728 shaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; 729 shaderStageInfo.pNext = DE_NULL; 730 shaderStageInfo.flags = (VkPipelineShaderStageCreateFlags)0; 731 shaderStageInfo.stage = VK_SHADER_STAGE_COMPUTE_BIT; 732 shaderStageInfo.pName = "main"; 733 shaderStageInfo.pSpecializationInfo = DE_NULL; 734 735 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()); ++shaderNdx) 736 { 737 shaderStageInfo.module = *(*shaderCompModules[shaderNdx]); 738 shaderStageInfos.push_back(shaderStageInfo); 739 } 740 return shaderStageInfos; 741 } 742 743 vector<VkComputePipelineCreateInfo> addPipelineInfo (VkPipelineLayout pipelineLayout, const vector<VkPipelineShaderStageCreateInfo>& shaderStageInfos) 744 { 745 vector<VkComputePipelineCreateInfo> pipelineInfos; 746 VkComputePipelineCreateInfo computePipelineInfo; 747 computePipelineInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; 748 computePipelineInfo.pNext = DE_NULL; 749 computePipelineInfo.flags = (VkPipelineCreateFlags)0; 750 computePipelineInfo.layout = pipelineLayout; 751 computePipelineInfo.basePipelineHandle = DE_NULL; 752 computePipelineInfo.basePipelineIndex = 0; 753 754 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()); ++shaderNdx) 755 { 756 computePipelineInfo.stage = shaderStageInfos[shaderNdx]; 757 pipelineInfos.push_back(computePipelineInfo); 758 } 759 return pipelineInfos; 760 } 761 762 const vector<deUint32> m_shadersExecutions; 763 }; 764 765 class PipelineCacheGraphicTestInstance : public TestInstance 766 { 767 typedef vector<SharedPtr<Unique<VkShaderModule> > > ShaderModuleVector; 768 public: 769 PipelineCacheGraphicTestInstance (Context& context, const vector<deUint32>& shadersExecutions) 770 : TestInstance (context) 771 , m_shadersExecutions (shadersExecutions) 772 773 { 774 } 775 776 TestStatus iterate (void) 777 { 778 requireFeatures(m_context.getInstanceInterface(), m_context.getPhysicalDevice(), FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS); 779 780 const DeviceInterface& vk = m_context.getDeviceInterface(); 781 MovePtr<MultiQueues> queues = createQueues (m_context, VK_QUEUE_GRAPHICS_BIT); 782 const VkDevice device = queues->getDevice(); 783 VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM; 784 Move<VkRenderPass> renderPass = createRenderPass(m_context, device, colorFormat); 785 const Move<VkDescriptorSetLayout> descriptorSetLayout (DescriptorSetLayoutBuilder() 786 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_VERTEX_BIT) 787 .build(vk, device)); 788 ShaderModuleVector shaderGraphicModules = addShaderModules(device); 789 const Move<VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout)); 790 vector<VkPipelineShaderStageCreateInfo> shaderStageInfos = addShaderStageInfo(shaderGraphicModules); 791 vector<VkGraphicsPipelineCreateInfo> pipelineInfo = addPipelineInfo(*pipelineLayout, shaderStageInfos, *renderPass); 792 const VkPipelineCacheCreateInfo pipelineCacheInfo = 793 { 794 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType; 795 DE_NULL, // const void* pNext; 796 0u, // VkPipelineCacheCreateFlags flags; 797 0u, // deUintptr initialDataSize; 798 DE_NULL, // const void* pInitialData; 799 }; 800 Move<VkPipelineCache> pipelineCache = createPipelineCache(vk, device, &pipelineCacheInfo); 801 Move<VkPipeline> pipeline = createGraphicsPipeline(vk, device, *pipelineCache, &pipelineInfo[0]); 802 const deUint32 numThreads = clamp(deGetNumAvailableLogicalCores(), 4u, 32u); 803 ThreadGroup threads; 804 805 executeGraphicPipeline(m_context, *pipeline, *pipelineLayout, *descriptorSetLayout, *queues, *renderPass, m_shadersExecutions[0]); 806 807 for (deUint32 ndx = 0; ndx < numThreads; ++ndx) 808 threads.add(MovePtr<ThreadGroupThread>(new CreateGraphicThread( 809 m_context, *pipelineCache, pipelineInfo, *pipelineLayout, *descriptorSetLayout, *queues, *renderPass, m_shadersExecutions))); 810 811 { 812 TestStatus thread_result = threads.run(); 813 if(thread_result.getCode()) 814 { 815 return thread_result; 816 } 817 } 818 return TestStatus::pass("Passed"); 819 } 820 821 private: 822 ShaderModuleVector addShaderModules (const VkDevice& device) 823 { 824 const DeviceInterface& vk = m_context.getDeviceInterface(); 825 ShaderModuleVector shaderModules; 826 shaderModules.resize(m_shadersExecutions.size() + 1); 827 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()); ++shaderNdx) 828 { 829 ostringstream shaderName; 830 shaderName<<"vert_"<<shaderNdx; 831 shaderModules[shaderNdx] = SharedPtr<Unique<VkShaderModule> > (new Unique<VkShaderModule>(createShaderModule(vk, device, m_context.getBinaryCollection().get(shaderName.str()), (VkShaderModuleCreateFlags)0))); 832 } 833 shaderModules[m_shadersExecutions.size()] = SharedPtr<Unique<VkShaderModule> > (new Unique<VkShaderModule>(createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), (VkShaderModuleCreateFlags)0))); 834 return shaderModules; 835 } 836 837 vector<VkPipelineShaderStageCreateInfo> addShaderStageInfo (const ShaderModuleVector& shaderCompModules) 838 { 839 VkPipelineShaderStageCreateInfo shaderStageInfo; 840 vector<VkPipelineShaderStageCreateInfo> shaderStageInfos; 841 shaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; 842 shaderStageInfo.pNext = DE_NULL; 843 shaderStageInfo.flags = (VkPipelineShaderStageCreateFlags)0; 844 shaderStageInfo.pName = "main"; 845 shaderStageInfo.pSpecializationInfo = DE_NULL; 846 847 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()); ++shaderNdx) 848 { 849 shaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT; 850 shaderStageInfo.module = *(*shaderCompModules[shaderNdx]); 851 shaderStageInfos.push_back(shaderStageInfo); 852 853 shaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT; 854 shaderStageInfo.module = *(*shaderCompModules[m_shadersExecutions.size()]); 855 shaderStageInfos.push_back(shaderStageInfo); 856 } 857 return shaderStageInfos; 858 } 859 860 vector<VkGraphicsPipelineCreateInfo> addPipelineInfo (VkPipelineLayout pipelineLayout, const vector<VkPipelineShaderStageCreateInfo>& shaderStageInfos, const VkRenderPass& renderPass) 861 { 862 VkExtent3D colorImageExtent = makeExtent3D(1u, 1u, 1u); 863 vector<VkGraphicsPipelineCreateInfo> pipelineInfo; 864 865 m_vertexInputStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; 866 m_vertexInputStateParams.pNext = DE_NULL; 867 m_vertexInputStateParams.flags = 0u; 868 m_vertexInputStateParams.vertexBindingDescriptionCount = 0u; 869 m_vertexInputStateParams.pVertexBindingDescriptions = DE_NULL; 870 m_vertexInputStateParams.vertexAttributeDescriptionCount = 0u; 871 m_vertexInputStateParams.pVertexAttributeDescriptions = DE_NULL; 872 873 m_inputAssemblyStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; 874 m_inputAssemblyStateParams.pNext = DE_NULL; 875 m_inputAssemblyStateParams.flags = 0u; 876 m_inputAssemblyStateParams.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; 877 m_inputAssemblyStateParams.primitiveRestartEnable = VK_FALSE; 878 879 m_viewport.x = 0.0f; 880 m_viewport.y = 0.0f; 881 m_viewport.width = (float)colorImageExtent.width; 882 m_viewport.height = (float)colorImageExtent.height; 883 m_viewport.minDepth = 0.0f; 884 m_viewport.maxDepth = 1.0f; 885 886 //TODO 887 m_scissor.offset.x = 0; 888 m_scissor.offset.y = 0; 889 m_scissor.extent.width = colorImageExtent.width; 890 m_scissor.extent.height = colorImageExtent.height; 891 892 m_viewportStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; 893 m_viewportStateParams.pNext = DE_NULL; 894 m_viewportStateParams.flags = 0u; 895 m_viewportStateParams.viewportCount = 1u; 896 m_viewportStateParams.pViewports = &m_viewport; 897 m_viewportStateParams.scissorCount = 1u; 898 m_viewportStateParams.pScissors = &m_scissor; 899 900 m_rasterStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; 901 m_rasterStateParams.pNext = DE_NULL; 902 m_rasterStateParams.flags = 0u; 903 m_rasterStateParams.depthClampEnable = VK_FALSE; 904 m_rasterStateParams.rasterizerDiscardEnable = VK_FALSE; 905 m_rasterStateParams.polygonMode = VK_POLYGON_MODE_FILL; 906 m_rasterStateParams.cullMode = VK_CULL_MODE_NONE; 907 m_rasterStateParams.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; 908 m_rasterStateParams.depthBiasEnable = VK_FALSE; 909 m_rasterStateParams.depthBiasConstantFactor = 0.0f; 910 m_rasterStateParams.depthBiasClamp = 0.0f; 911 m_rasterStateParams.depthBiasSlopeFactor = 0.0f; 912 m_rasterStateParams.lineWidth = 1.0f; 913 914 m_colorBlendAttachmentState.blendEnable = VK_FALSE; 915 m_colorBlendAttachmentState.srcColorBlendFactor = VK_BLEND_FACTOR_ONE; 916 m_colorBlendAttachmentState.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO; 917 m_colorBlendAttachmentState.colorBlendOp = VK_BLEND_OP_ADD; 918 m_colorBlendAttachmentState.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; 919 m_colorBlendAttachmentState.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; 920 m_colorBlendAttachmentState.alphaBlendOp = VK_BLEND_OP_ADD; 921 m_colorBlendAttachmentState.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | 922 VK_COLOR_COMPONENT_G_BIT | 923 VK_COLOR_COMPONENT_B_BIT | 924 VK_COLOR_COMPONENT_A_BIT; 925 926 m_colorBlendStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; 927 m_colorBlendStateParams.pNext = DE_NULL; 928 m_colorBlendStateParams.flags = 0u; 929 m_colorBlendStateParams.logicOpEnable = VK_FALSE; 930 m_colorBlendStateParams.logicOp = VK_LOGIC_OP_COPY; 931 m_colorBlendStateParams.attachmentCount = 1u; 932 m_colorBlendStateParams.pAttachments = &m_colorBlendAttachmentState; 933 m_colorBlendStateParams.blendConstants[0] = 0.0f; 934 m_colorBlendStateParams.blendConstants[1] = 0.0f; 935 m_colorBlendStateParams.blendConstants[2] = 0.0f; 936 m_colorBlendStateParams.blendConstants[3] = 0.0f; 937 938 m_multisampleStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; 939 m_multisampleStateParams.pNext = DE_NULL; 940 m_multisampleStateParams.flags = 0u; 941 m_multisampleStateParams.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; 942 m_multisampleStateParams.sampleShadingEnable = VK_FALSE; 943 m_multisampleStateParams.minSampleShading = 0.0f; 944 m_multisampleStateParams.pSampleMask = DE_NULL; 945 m_multisampleStateParams.alphaToCoverageEnable = VK_FALSE; 946 m_multisampleStateParams.alphaToOneEnable = VK_FALSE; 947 948 m_depthStencilStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; 949 m_depthStencilStateParams.pNext = DE_NULL; 950 m_depthStencilStateParams.flags = 0u; 951 m_depthStencilStateParams.depthTestEnable = VK_TRUE; 952 m_depthStencilStateParams.depthWriteEnable = VK_TRUE; 953 m_depthStencilStateParams.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL; 954 m_depthStencilStateParams.depthBoundsTestEnable = VK_FALSE; 955 m_depthStencilStateParams.stencilTestEnable = VK_FALSE; 956 m_depthStencilStateParams.front.failOp = VK_STENCIL_OP_KEEP; 957 m_depthStencilStateParams.front.passOp = VK_STENCIL_OP_KEEP; 958 m_depthStencilStateParams.front.depthFailOp = VK_STENCIL_OP_KEEP; 959 m_depthStencilStateParams.front.compareOp = VK_COMPARE_OP_NEVER; 960 m_depthStencilStateParams.front.compareMask = 0u; 961 m_depthStencilStateParams.front.writeMask = 0u; 962 m_depthStencilStateParams.front.reference = 0u; 963 m_depthStencilStateParams.back.failOp = VK_STENCIL_OP_KEEP; 964 m_depthStencilStateParams.back.passOp = VK_STENCIL_OP_KEEP; 965 m_depthStencilStateParams.back.depthFailOp = VK_STENCIL_OP_KEEP; 966 m_depthStencilStateParams.back.compareOp = VK_COMPARE_OP_NEVER; 967 m_depthStencilStateParams.back.compareMask = 0u; 968 m_depthStencilStateParams.back.writeMask = 0u; 969 m_depthStencilStateParams.back.reference = 0u; 970 m_depthStencilStateParams.minDepthBounds = 0.0f; 971 m_depthStencilStateParams.maxDepthBounds = 1.0f; 972 973 VkGraphicsPipelineCreateInfo graphicsPipelineParams = 974 { 975 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType; 976 DE_NULL, // const void* pNext; 977 0u, // VkPipelineCreateFlags flags; 978 2u, // deUint32 stageCount; 979 DE_NULL, // const VkPipelineShaderStageCreateInfo* pStages; 980 &m_vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState; 981 &m_inputAssemblyStateParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; 982 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState; 983 &m_viewportStateParams, // const VkPipelineViewportStateCreateInfo* pViewportState; 984 &m_rasterStateParams, // const VkPipelineRasterizationStateCreateInfo* pRasterState; 985 &m_multisampleStateParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState; 986 &m_depthStencilStateParams, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; 987 &m_colorBlendStateParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState; 988 (const VkPipelineDynamicStateCreateInfo*)DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState; 989 pipelineLayout, // VkPipelineLayout layout; 990 renderPass, // VkRenderPass renderPass; 991 0u, // deUint32 subpass; 992 DE_NULL, // VkPipeline basePipelineHandle; 993 0, // deInt32 basePipelineIndex; 994 }; 995 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()) * 2; shaderNdx+=2) 996 { 997 graphicsPipelineParams.pStages = &shaderStageInfos[shaderNdx]; 998 pipelineInfo.push_back(graphicsPipelineParams); 999 } 1000 return pipelineInfo; 1001 } 1002 1003 const vector<deUint32> m_shadersExecutions; 1004 VkPipelineVertexInputStateCreateInfo m_vertexInputStateParams; 1005 VkPipelineInputAssemblyStateCreateInfo m_inputAssemblyStateParams; 1006 VkViewport m_viewport; 1007 VkRect2D m_scissor; 1008 VkPipelineViewportStateCreateInfo m_viewportStateParams; 1009 VkPipelineRasterizationStateCreateInfo m_rasterStateParams; 1010 VkPipelineColorBlendAttachmentState m_colorBlendAttachmentState; 1011 VkPipelineColorBlendStateCreateInfo m_colorBlendStateParams; 1012 VkPipelineMultisampleStateCreateInfo m_multisampleStateParams; 1013 VkPipelineDepthStencilStateCreateInfo m_depthStencilStateParams; 1014 }; 1015 1016 class PipelineCacheComputeTest : public TestCase 1017 { 1018 public: 1019 PipelineCacheComputeTest (TestContext& testCtx, 1020 const string& name, 1021 const string& description) 1022 :TestCase (testCtx, name, description) 1023 { 1024 } 1025 1026 void initPrograms (SourceCollections& programCollection) const 1027 { 1028 ostringstream buffer; 1029 buffer << "layout(set = 0, binding = 0, std430) buffer Output\n" 1030 << "{\n" 1031 << " int result[];\n" 1032 << "} sb_out;\n"; 1033 { 1034 ostringstream src; 1035 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_310_ES) << "\n" 1036 << "\n" 1037 << "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n" 1038 << "\n" 1039 << buffer.str() 1040 << "void main (void)\n" 1041 << "{\n" 1042 << " highp uint ndx = gl_GlobalInvocationID.x;\n" 1043 << " sb_out.result[ndx] = int(ndx);\n" 1044 << "}\n"; 1045 programCollection.glslSources.add("compute_0") << glu::ComputeSource(src.str()); 1046 } 1047 { 1048 ostringstream src; 1049 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_310_ES) << "\n" 1050 << "\n" 1051 << "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n" 1052 << "\n" 1053 << buffer.str() 1054 << "void main (void)\n" 1055 << "{\n" 1056 << " for (highp uint ndx = 0u; ndx < "<<BUFFER_ELEMENT_COUNT<<"u; ndx++)\n" 1057 << " {\n" 1058 << " sb_out.result[ndx] = int(ndx);\n" 1059 << " }\n" 1060 << "}\n"; 1061 programCollection.glslSources.add("compute_1") << glu::ComputeSource(src.str()); 1062 } 1063 { 1064 ostringstream src; 1065 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_310_ES) << "\n" 1066 << "\n" 1067 << "layout(local_size_x = "<<BUFFER_ELEMENT_COUNT<<", local_size_y = 1, local_size_z = 1) in;\n" 1068 << "\n" 1069 << buffer.str() 1070 << "void main (void)\n" 1071 << "{\n" 1072 << " highp uint ndx = gl_LocalInvocationID.x;\n" 1073 << " sb_out.result[ndx] = int(ndx);\n" 1074 << "}\n"; 1075 programCollection.glslSources.add("compute_2") << glu::ComputeSource(src.str()); 1076 } 1077 } 1078 1079 TestInstance* createInstance (Context& context) const 1080 { 1081 vector<deUint32> shadersExecutions; 1082 shadersExecutions.push_back(16u); //compute_0 1083 shadersExecutions.push_back(1u); //compute_1 1084 shadersExecutions.push_back(1u); //compute_2 1085 return new PipelineCacheComputeTestInstance(context, shadersExecutions); 1086 } 1087 }; 1088 1089 class PipelineCacheGraphicTest : public TestCase 1090 { 1091 public: 1092 PipelineCacheGraphicTest (TestContext& testCtx, 1093 const string& name, 1094 const string& description) 1095 :TestCase (testCtx, name, description) 1096 { 1097 1098 } 1099 1100 void initPrograms (SourceCollections& programCollection) const 1101 { 1102 ostringstream buffer; 1103 buffer << "layout(set = 0, binding = 0, std430) buffer Output\n" 1104 << "{\n" 1105 << " int result[];\n" 1106 << "} sb_out;\n"; 1107 1108 // Vertex 1109 { 1110 std::ostringstream src; 1111 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n" 1112 << "\n" 1113 << buffer.str() 1114 << "\n" 1115 << "void main (void)\n" 1116 << "{\n" 1117 << " sb_out.result[gl_VertexIndex] = int(gl_VertexIndex);\n" 1118 << " gl_PointSize = 1.0f;\n" 1119 << "}\n"; 1120 programCollection.glslSources.add("vert_0") << glu::VertexSource(src.str()); 1121 } 1122 // Vertex 1123 { 1124 std::ostringstream src; 1125 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n" 1126 << "\n" 1127 << buffer.str() 1128 << "\n" 1129 << "void main (void)\n" 1130 << "{\n" 1131 << " for (highp uint ndx = 0u; ndx < "<<BUFFER_ELEMENT_COUNT<<"u; ndx++)\n" 1132 << " {\n" 1133 << " sb_out.result[ndx] = int(ndx);\n" 1134 << " }\n" 1135 << " gl_PointSize = 1.0f;\n" 1136 << "}\n"; 1137 programCollection.glslSources.add("vert_1") << glu::VertexSource(src.str()); 1138 } 1139 // Vertex 1140 { 1141 std::ostringstream src; 1142 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n" 1143 << "\n" 1144 << buffer.str() 1145 << "\n" 1146 << "void main (void)\n" 1147 << "{\n" 1148 << " for (int ndx = "<<BUFFER_ELEMENT_COUNT-1<<"; ndx >= 0; ndx--)\n" 1149 << " {\n" 1150 << " sb_out.result[uint(ndx)] = ndx;\n" 1151 << " }\n" 1152 << " gl_PointSize = 1.0f;\n" 1153 << "}\n"; 1154 programCollection.glslSources.add("vert_2") << glu::VertexSource(src.str()); 1155 } 1156 // Fragment 1157 { 1158 std::ostringstream src; 1159 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n" 1160 << "\n" 1161 << "layout(location = 0) out vec4 o_color;\n" 1162 << "\n" 1163 << "void main (void)\n" 1164 << "{\n" 1165 << " o_color = vec4(1.0);\n" 1166 << "}\n"; 1167 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str()); 1168 } 1169 } 1170 1171 TestInstance* createInstance (Context& context) const 1172 { 1173 vector<deUint32> shadersExecutions; 1174 shadersExecutions.push_back(16u); //vert_0 1175 shadersExecutions.push_back(1u); //vert_1 1176 shadersExecutions.push_back(1u); //vert_2 1177 return new PipelineCacheGraphicTestInstance(context, shadersExecutions); 1178 } 1179 }; 1180 1181 1182 } // anonymous 1183 1184 tcu::TestCaseGroup* createInternallySynchronizedObjects (tcu::TestContext& testCtx) 1185 { 1186 de::MovePtr<tcu::TestCaseGroup> tests(new tcu::TestCaseGroup(testCtx, "internally_synchronized_objects", "Internally synchronized objects")); 1187 tests->addChild(new PipelineCacheComputeTest(testCtx, "pipeline_cache_compute", "Internally synchronized object VkPipelineCache for compute pipeline is tested")); 1188 tests->addChild(new PipelineCacheGraphicTest(testCtx, "pipeline_cache_graphics", "Internally synchronized object VkPipelineCache for graphics pipeline is tested")); 1189 return tests.release(); 1190 } 1191 1192 } // synchronization 1193 } // vkt 1194