1 /*------------------------------------------------------------------------ 2 * Vulkan Conformance Tests 3 * ------------------------ 4 * 5 * Copyright (c) 2016 The Khronos Group Inc. 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); 8 * you may not use this file except in compliance with the License. 9 * You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 * 19 *//*! 20 * \file 21 * \brief Synchronization primitive tests with multi queue 22 *//*--------------------------------------------------------------------*/ 23 24 #include "vktSynchronizationOperationMultiQueueTests.hpp" 25 #include "vkDefs.hpp" 26 #include "vktTestCase.hpp" 27 #include "vktTestCaseUtil.hpp" 28 #include "vkRef.hpp" 29 #include "vkRefUtil.hpp" 30 #include "vkMemUtil.hpp" 31 #include "vkQueryUtil.hpp" 32 #include "vkTypeUtil.hpp" 33 #include "vkPlatform.hpp" 34 #include "deUniquePtr.hpp" 35 #include "tcuTestLog.hpp" 36 #include "vktSynchronizationUtil.hpp" 37 #include "vktSynchronizationOperation.hpp" 38 #include "vktSynchronizationOperationTestData.hpp" 39 #include "vktSynchronizationOperationResources.hpp" 40 #include "vktTestGroupUtil.hpp" 41 42 namespace vkt 43 { 44 namespace synchronization 45 { 46 namespace 47 { 48 using namespace vk; 49 using de::MovePtr; 50 using de::UniquePtr; 51 52 enum QueueType 53 { 54 QUEUETYPE_WRITE, 55 QUEUETYPE_READ 56 }; 57 58 struct QueuePair 59 { 60 QueuePair (const deUint32 familyWrite, const deUint32 familyRead, const VkQueue write, const VkQueue read) 61 : familyIndexWrite (familyWrite) 62 , familyIndexRead (familyRead) 63 , queueWrite (write) 64 , queueRead (read) 65 {} 66 67 deUint32 familyIndexWrite; 68 deUint32 familyIndexRead; 69 VkQueue queueWrite; 70 VkQueue queueRead; 71 }; 72 73 bool checkQueueFlags (VkQueueFlags availableFlags, const VkQueueFlags neededFlags) 74 { 75 if ((availableFlags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) != 0) 76 availableFlags |= VK_QUEUE_TRANSFER_BIT; 77 78 return (availableFlags & neededFlags) != 0; 79 } 80 81 class MultiQueues 82 { 83 struct QueueData 84 { 85 VkQueueFlags flags; 86 std::vector<VkQueue> queue; 87 }; 88 89 public: 90 MultiQueues (const Context& context) 91 { 92 const InstanceInterface& instance = context.getInstanceInterface(); 93 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice(); 94 const std::vector<VkQueueFamilyProperties> queueFamilyProperties = getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice); 95 96 for (deUint32 queuePropertiesNdx = 0; queuePropertiesNdx < queueFamilyProperties.size(); ++queuePropertiesNdx) 97 { 98 addQueueIndex(queuePropertiesNdx, 99 std::min(2u, queueFamilyProperties[queuePropertiesNdx].queueCount), 100 queueFamilyProperties[queuePropertiesNdx].queueFlags); 101 } 102 103 std::vector<VkDeviceQueueCreateInfo> queueInfos; 104 const float queuePriorities[2] = { 1.0f, 1.0f }; //get max 2 queues from one family 105 106 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it!= m_queues.end(); ++it) 107 { 108 const VkDeviceQueueCreateInfo queueInfo = 109 { 110 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, //VkStructureType sType; 111 DE_NULL, //const void* pNext; 112 (VkDeviceQueueCreateFlags)0u, //VkDeviceQueueCreateFlags flags; 113 it->first, //deUint32 queueFamilyIndex; 114 static_cast<deUint32>(it->second.queue.size()), //deUint32 queueCount; 115 &queuePriorities[0] //const float* pQueuePriorities; 116 }; 117 queueInfos.push_back(queueInfo); 118 } 119 120 { 121 const VkDeviceCreateInfo deviceInfo = 122 { 123 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //VkStructureType sType; 124 DE_NULL, //const void* pNext; 125 0u, //VkDeviceCreateFlags flags; 126 static_cast<deUint32>(queueInfos.size()), //deUint32 queueCreateInfoCount; 127 &queueInfos[0], //const VkDeviceQueueCreateInfo* pQueueCreateInfos; 128 0u, //deUint32 enabledLayerCount; 129 DE_NULL, //const char* const* ppEnabledLayerNames; 130 0u, //deUint32 enabledExtensionCount; 131 DE_NULL, //const char* const* ppEnabledExtensionNames; 132 &context.getDeviceFeatures() //const VkPhysicalDeviceFeatures* pEnabledFeatures; 133 }; 134 135 m_logicalDevice = createDevice(instance, physicalDevice, &deviceInfo); 136 m_deviceDriver = MovePtr<DeviceDriver>(new DeviceDriver(instance, *m_logicalDevice)); 137 m_allocator = MovePtr<Allocator>(new SimpleAllocator(*m_deviceDriver, *m_logicalDevice, getPhysicalDeviceMemoryProperties(instance, physicalDevice))); 138 139 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it) 140 for (int queueNdx = 0; queueNdx < static_cast<int>(it->second.queue.size()); ++queueNdx) 141 m_deviceDriver->getDeviceQueue(*m_logicalDevice, it->first, queueNdx, &it->second.queue[queueNdx]); 142 } 143 } 144 145 void addQueueIndex (const deUint32 queueFamilyIndex, const deUint32 count, const VkQueueFlags flags) 146 { 147 QueueData dataToPush; 148 dataToPush.flags = flags; 149 dataToPush.queue.resize(count); 150 m_queues[queueFamilyIndex] = dataToPush; 151 } 152 153 std::vector<QueuePair> getQueuesPairs (const VkQueueFlags flagsWrite, const VkQueueFlags flagsRead) 154 { 155 std::map<deUint32, QueueData> queuesWrite; 156 std::map<deUint32, QueueData> queuesRead; 157 std::vector<QueuePair> queuesPairs; 158 159 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it) 160 { 161 const bool writeQueue = checkQueueFlags(it->second.flags, flagsWrite); 162 const bool readQueue = checkQueueFlags(it->second.flags, flagsRead); 163 164 if (!(writeQueue || readQueue)) 165 continue; 166 167 if (writeQueue && readQueue) 168 { 169 queuesWrite[it->first] = it->second; 170 queuesRead[it->first] = it->second; 171 } 172 else if (writeQueue) 173 queuesWrite[it->first] = it->second; 174 else if (readQueue) 175 queuesRead[it->first] = it->second; 176 } 177 178 for (std::map<deUint32, QueueData>::iterator write = queuesWrite.begin(); write != queuesWrite.end(); ++write) 179 for (std::map<deUint32, QueueData>::iterator read = queuesRead.begin(); read != queuesRead.end(); ++read) 180 { 181 const int writeSize = static_cast<int>(write->second.queue.size()); 182 const int readSize = static_cast<int>(read->second.queue.size()); 183 184 for (int writeNdx = 0; writeNdx < writeSize; ++writeNdx) 185 for (int readNdx = 0; readNdx < readSize; ++readNdx) 186 { 187 if (write->second.queue[writeNdx] != read->second.queue[readNdx]) 188 { 189 queuesPairs.push_back(QueuePair(write->first, read->first, write->second.queue[writeNdx], read->second.queue[readNdx])); 190 writeNdx = readNdx = std::max(writeSize, readSize); //exit from the loops 191 } 192 } 193 } 194 195 if (queuesPairs.empty()) 196 TCU_THROW(NotSupportedError, "Queue not found"); 197 198 return queuesPairs; 199 } 200 201 VkDevice getDevice (void) const 202 { 203 return *m_logicalDevice; 204 } 205 206 const DeviceInterface& getDeviceInterface (void) const 207 { 208 return *m_deviceDriver; 209 } 210 211 Allocator& getAllocator (void) 212 { 213 return *m_allocator; 214 } 215 216 private: 217 Move<VkDevice> m_logicalDevice; 218 MovePtr<DeviceDriver> m_deviceDriver; 219 MovePtr<Allocator> m_allocator; 220 std::map<deUint32, QueueData> m_queues; 221 }; 222 223 void createBarrierMultiQueue (const DeviceInterface& vk, 224 const VkCommandBuffer& cmdBuffer, 225 const SyncInfo& writeSync, 226 const SyncInfo& readSync, 227 const Resource& resource, 228 const deUint32 writeFamily, 229 const deUint32 readFamily, 230 const VkSharingMode sharingMode, 231 const bool secondQueue = false) 232 { 233 if (resource.getType() == RESOURCE_TYPE_IMAGE) 234 { 235 VkImageMemoryBarrier barrier = makeImageMemoryBarrier(writeSync.accessMask, readSync.accessMask, 236 writeSync.imageLayout, readSync.imageLayout, resource.getImage().handle, resource.getImage().subresourceRange); 237 238 if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode) 239 { 240 barrier.srcQueueFamilyIndex = writeFamily; 241 barrier.dstQueueFamilyIndex = readFamily; 242 if (secondQueue) 243 { 244 barrier.oldLayout = barrier.newLayout; 245 barrier.srcAccessMask = barrier.dstAccessMask; 246 } 247 vk.cmdPipelineBarrier(cmdBuffer, writeSync.stageMask, readSync.stageMask, (VkDependencyFlags)0, 0u, (const VkMemoryBarrier*)DE_NULL, 0u, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &barrier); 248 } 249 else if (!secondQueue) 250 vk.cmdPipelineBarrier(cmdBuffer, writeSync.stageMask, readSync.stageMask, (VkDependencyFlags)0, 0u, (const VkMemoryBarrier*)DE_NULL, 0u, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &barrier); 251 } 252 else if ((resource.getType() == RESOURCE_TYPE_BUFFER || isIndirectBuffer(resource.getType())) && 253 writeFamily != readFamily && 254 VK_SHARING_MODE_EXCLUSIVE == sharingMode) 255 { 256 const VkBufferMemoryBarrier barrier = 257 { 258 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType; 259 DE_NULL, // const void* pNext; 260 writeSync.accessMask , // VkAccessFlags srcAccessMask; 261 readSync.accessMask, // VkAccessFlags dstAccessMask; 262 writeFamily, // deUint32 srcQueueFamilyIndex; 263 readFamily, // deUint32 destQueueFamilyIndex; 264 resource.getBuffer().handle, // VkBuffer buffer; 265 resource.getBuffer().offset, // VkDeviceSize offset; 266 resource.getBuffer().size, // VkDeviceSize size; 267 }; 268 vk.cmdPipelineBarrier(cmdBuffer, writeSync.stageMask, readSync.stageMask, (VkDependencyFlags)0, 0u, (const VkMemoryBarrier*)DE_NULL, 1u, (const VkBufferMemoryBarrier*)&barrier, 0u, (const VkImageMemoryBarrier *)DE_NULL); 269 } 270 } 271 272 class BaseTestInstance : public TestInstance 273 { 274 public: 275 BaseTestInstance (Context& context, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData) 276 : TestInstance (context) 277 , m_queues (new MultiQueues(context)) 278 , m_opContext (new OperationContext(context, pipelineCacheData, m_queues->getDeviceInterface(), m_queues->getDevice(), m_queues->getAllocator())) 279 , m_resourceDesc (resourceDesc) 280 , m_writeOp (writeOp) 281 , m_readOp (readOp) 282 { 283 } 284 285 protected: 286 const UniquePtr<MultiQueues> m_queues; 287 const UniquePtr<OperationContext> m_opContext; 288 const ResourceDescription m_resourceDesc; 289 const OperationSupport& m_writeOp; 290 const OperationSupport& m_readOp; 291 }; 292 293 class SemaphoreTestInstance : public BaseTestInstance 294 { 295 public: 296 SemaphoreTestInstance (Context& context, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode) 297 : BaseTestInstance (context, resourceDesc, writeOp, readOp, pipelineCacheData) 298 , m_sharingMode (sharingMode) 299 { 300 } 301 302 tcu::TestStatus iterate (void) 303 { 304 const DeviceInterface& vk = m_opContext->getDeviceInterface(); 305 const VkDevice device = m_opContext->getDevice(); 306 const std::vector<QueuePair> queuePairs = m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext)); 307 308 for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx) 309 { 310 311 const UniquePtr<Resource> resource (new Resource(*m_opContext, m_resourceDesc, m_writeOp.getResourceUsageFlags() | m_readOp.getResourceUsageFlags())); 312 const UniquePtr<Operation> writeOp (m_writeOp.build(*m_opContext, *resource)); 313 const UniquePtr<Operation> readOp (m_readOp.build (*m_opContext, *resource)); 314 315 const Move<VkCommandPool> cmdPool[] = 316 { 317 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite), 318 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead) 319 }; 320 const Move<VkCommandBuffer> ptrCmdBuffer[] = 321 { 322 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]), 323 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ]) 324 }; 325 const VkCommandBuffer cmdBuffers[] = 326 { 327 *ptrCmdBuffer[QUEUETYPE_WRITE], 328 *ptrCmdBuffer[QUEUETYPE_READ] 329 }; 330 const Unique<VkSemaphore> semaphore (createSemaphore(vk, device)); 331 const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT }; 332 const VkSubmitInfo submitInfo[] = 333 { 334 { 335 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType; 336 DE_NULL, // const void* pNext; 337 0u, // deUint32 waitSemaphoreCount; 338 DE_NULL, // const VkSemaphore* pWaitSemaphores; 339 (const VkPipelineStageFlags*)DE_NULL, 340 1u, // deUint32 commandBufferCount; 341 &cmdBuffers[QUEUETYPE_WRITE], // const VkCommandBuffer* pCommandBuffers; 342 1u, // deUint32 signalSemaphoreCount; 343 &semaphore.get(), // const VkSemaphore* pSignalSemaphores; 344 }, 345 { 346 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType; 347 DE_NULL, // const void* pNext; 348 1u, // deUint32 waitSemaphoreCount; 349 &semaphore.get(), // const VkSemaphore* pWaitSemaphores; 350 stageBits, // const VkPipelineStageFlags* pWaitDstStageMask; 351 1u, // deUint32 commandBufferCount; 352 &cmdBuffers[QUEUETYPE_READ], // const VkCommandBuffer* pCommandBuffers; 353 0u, // deUint32 signalSemaphoreCount; 354 DE_NULL, // const VkSemaphore* pSignalSemaphores; 355 } 356 }; 357 const SyncInfo writeSync = writeOp->getSyncInfo(); 358 const SyncInfo readSync = readOp->getSyncInfo(); 359 360 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]); 361 writeOp->recordCommands (cmdBuffers[QUEUETYPE_WRITE]); 362 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_WRITE], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode); 363 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]); 364 365 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]); 366 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_READ], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true); 367 readOp->recordCommands (cmdBuffers[QUEUETYPE_READ]); 368 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]); 369 370 VK_CHECK(vk.queueSubmit(queuePairs[pairNdx].queueWrite, 1u, &submitInfo[QUEUETYPE_WRITE], DE_NULL)); 371 VK_CHECK(vk.queueSubmit(queuePairs[pairNdx].queueRead, 1u, &submitInfo[QUEUETYPE_READ], DE_NULL)); 372 VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueWrite)); 373 VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueRead)); 374 375 { 376 const Data expected = writeOp->getData(); 377 const Data actual = readOp->getData(); 378 379 if (0 != deMemCmp(expected.data, actual.data, expected.size)) 380 return tcu::TestStatus::fail("Memory contents don't match"); 381 } 382 } 383 return tcu::TestStatus::pass("OK"); 384 } 385 386 private: 387 const VkSharingMode m_sharingMode; 388 }; 389 390 class FenceTestInstance : public BaseTestInstance 391 { 392 public: 393 FenceTestInstance (Context& context, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode) 394 : BaseTestInstance (context, resourceDesc, writeOp, readOp, pipelineCacheData) 395 , m_sharingMode (sharingMode) 396 { 397 } 398 399 tcu::TestStatus iterate (void) 400 { 401 const DeviceInterface& vk = m_opContext->getDeviceInterface(); 402 const VkDevice device = m_opContext->getDevice(); 403 const std::vector<QueuePair> queuePairs = m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext)); 404 405 for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx) 406 { 407 const UniquePtr<Resource> resource (new Resource(*m_opContext, m_resourceDesc, m_writeOp.getResourceUsageFlags() | m_readOp.getResourceUsageFlags())); 408 const UniquePtr<Operation> writeOp (m_writeOp.build(*m_opContext, *resource)); 409 const UniquePtr<Operation> readOp (m_readOp.build(*m_opContext, *resource)); 410 const Move<VkCommandPool> cmdPool[] = 411 { 412 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite), 413 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead) 414 }; 415 const Move<VkCommandBuffer> ptrCmdBuffer[] = 416 { 417 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]), 418 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ]) 419 }; 420 const VkCommandBuffer cmdBuffers[] = 421 { 422 *ptrCmdBuffer[QUEUETYPE_WRITE], 423 *ptrCmdBuffer[QUEUETYPE_READ] 424 }; 425 const SyncInfo writeSync = writeOp->getSyncInfo(); 426 const SyncInfo readSync = readOp->getSyncInfo(); 427 428 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]); 429 writeOp->recordCommands (cmdBuffers[QUEUETYPE_WRITE]); 430 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_WRITE], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode); 431 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]); 432 433 submitCommandsAndWait (vk, device, queuePairs[pairNdx].queueWrite, cmdBuffers[QUEUETYPE_WRITE]); 434 435 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]); 436 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_READ], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true); 437 readOp->recordCommands (cmdBuffers[QUEUETYPE_READ]); 438 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]); 439 440 submitCommandsAndWait (vk, device, queuePairs[pairNdx].queueRead, cmdBuffers[QUEUETYPE_READ]); 441 442 { 443 const Data expected = writeOp->getData(); 444 const Data actual = readOp->getData(); 445 446 if (0 != deMemCmp(expected.data, actual.data, expected.size)) 447 return tcu::TestStatus::fail("Memory contents don't match"); 448 } 449 } 450 return tcu::TestStatus::pass("OK"); 451 } 452 453 private: 454 const VkSharingMode m_sharingMode; 455 }; 456 457 class BaseTestCase : public TestCase 458 { 459 public: 460 BaseTestCase (tcu::TestContext& testCtx, 461 const std::string& name, 462 const std::string& description, 463 const SyncPrimitive syncPrimitive, 464 const ResourceDescription resourceDesc, 465 const OperationName writeOp, 466 const OperationName readOp, 467 const VkSharingMode sharingMode, 468 PipelineCacheData& pipelineCacheData) 469 : TestCase (testCtx, name, description) 470 , m_resourceDesc (resourceDesc) 471 , m_writeOp (makeOperationSupport(writeOp, resourceDesc)) 472 , m_readOp (makeOperationSupport(readOp, resourceDesc)) 473 , m_syncPrimitive (syncPrimitive) 474 , m_sharingMode (sharingMode) 475 , m_pipelineCacheData (pipelineCacheData) 476 { 477 } 478 479 void initPrograms (SourceCollections& programCollection) const 480 { 481 m_writeOp->initPrograms(programCollection); 482 m_readOp->initPrograms(programCollection); 483 } 484 485 TestInstance* createInstance (Context& context) const 486 { 487 switch (m_syncPrimitive) 488 { 489 case SYNC_PRIMITIVE_FENCE: 490 return new FenceTestInstance(context, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode); 491 case SYNC_PRIMITIVE_SEMAPHORE: 492 return new SemaphoreTestInstance(context, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode); 493 default: 494 DE_ASSERT(0); 495 return DE_NULL; 496 } 497 } 498 499 private: 500 const ResourceDescription m_resourceDesc; 501 const UniquePtr<OperationSupport> m_writeOp; 502 const UniquePtr<OperationSupport> m_readOp; 503 const SyncPrimitive m_syncPrimitive; 504 const VkSharingMode m_sharingMode; 505 PipelineCacheData& m_pipelineCacheData; 506 }; 507 508 void createTests (tcu::TestCaseGroup* group, PipelineCacheData* pipelineCacheData) 509 { 510 tcu::TestContext& testCtx = group->getTestContext(); 511 512 static const struct 513 { 514 const char* name; 515 SyncPrimitive syncPrimitive; 516 int numOptions; 517 } groups[] = 518 { 519 { "fence", SYNC_PRIMITIVE_FENCE, 1 }, 520 { "semaphore", SYNC_PRIMITIVE_SEMAPHORE, 1 } 521 }; 522 523 for (int groupNdx = 0; groupNdx < DE_LENGTH_OF_ARRAY(groups); ++groupNdx) 524 { 525 MovePtr<tcu::TestCaseGroup> synchGroup (new tcu::TestCaseGroup(testCtx, groups[groupNdx].name, "")); 526 527 for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx) 528 for (int readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(s_readOps); ++readOpNdx) 529 { 530 const OperationName writeOp = s_writeOps[writeOpNdx]; 531 const OperationName readOp = s_readOps[readOpNdx]; 532 const std::string opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp); 533 bool empty = true; 534 535 MovePtr<tcu::TestCaseGroup> opGroup (new tcu::TestCaseGroup(testCtx, opGroupName.c_str(), "")); 536 537 for (int optionNdx = 0; optionNdx <= groups[groupNdx].numOptions; ++optionNdx) 538 for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx) 539 { 540 const ResourceDescription& resource = s_resources[resourceNdx]; 541 std::string name = getResourceName(resource); 542 VkSharingMode sharingMode = VK_SHARING_MODE_EXCLUSIVE; 543 544 // queue family sharing mode used for resource 545 if (optionNdx) 546 { 547 name += "_concurrent"; 548 sharingMode = VK_SHARING_MODE_CONCURRENT; 549 } 550 else 551 name += "_exclusive"; 552 553 if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource)) 554 { 555 opGroup->addChild(new BaseTestCase(testCtx, name, "", groups[groupNdx].syncPrimitive, resource, writeOp, readOp, sharingMode, *pipelineCacheData)); 556 empty = false; 557 } 558 } 559 if (!empty) 560 synchGroup->addChild(opGroup.release()); 561 } 562 group->addChild(synchGroup.release()); 563 } 564 } 565 566 } // anonymous 567 568 tcu::TestCaseGroup* createSynchronizedOperationMultiQueueTests (tcu::TestContext& testCtx, PipelineCacheData& pipelineCacheData) 569 { 570 return createTestGroup(testCtx, "multi_queue", "Synchronization of a memory-modifying operation", createTests, &pipelineCacheData); 571 } 572 573 } // synchronization 574 } // vkt 575