1 /* 2 * Copyright 2016 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "VkTestContext.h" 9 10 #ifdef SK_VULKAN 11 12 #include "GrContext.h" 13 #include "VkTestUtils.h" 14 #include "vk/GrVkInterface.h" 15 #include "vk/GrVkUtil.h" 16 17 namespace { 18 /** 19 * Implements sk_gpu_test::FenceSync for Vulkan. It creates a single command 20 * buffer with USAGE_SIMULTANEOUS with no content . On every insertFence request 21 * it submits the command buffer with a new fence. 22 */ 23 class VkFenceSync : public sk_gpu_test::FenceSync { 24 public: 25 VkFenceSync(sk_sp<const GrVkInterface> vk, VkDevice device, VkQueue queue, 26 uint32_t queueFamilyIndex) 27 : fVk(std::move(vk)) 28 , fDevice(device) 29 , fQueue(queue) { 30 SkDEBUGCODE(fUnfinishedSyncs = 0;) 31 VkCommandPoolCreateInfo createInfo; 32 createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; 33 createInfo.pNext = nullptr; 34 createInfo.flags = 0; 35 createInfo.queueFamilyIndex = queueFamilyIndex; 36 GR_VK_CALL_ERRCHECK(fVk, CreateCommandPool(fDevice, &createInfo, nullptr, &fCommandPool)); 37 38 VkCommandBufferAllocateInfo allocateInfo; 39 allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; 40 allocateInfo.pNext = nullptr; 41 allocateInfo.commandBufferCount = 1; 42 allocateInfo.commandPool = fCommandPool; 43 allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; 44 GR_VK_CALL_ERRCHECK(fVk, AllocateCommandBuffers(fDevice, &allocateInfo, &fCommandBuffer)); 45 46 VkCommandBufferBeginInfo beginInfo; 47 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; 48 beginInfo.pNext = nullptr; 49 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; 50 beginInfo.pInheritanceInfo = nullptr; 51 GR_VK_CALL_ERRCHECK(fVk, BeginCommandBuffer(fCommandBuffer, &beginInfo)); 52 GR_VK_CALL_ERRCHECK(fVk, EndCommandBuffer(fCommandBuffer)); 53 } 54 55 ~VkFenceSync() override { 56 SkASSERT(!fUnfinishedSyncs); 57 // If the above assertion is true then the command buffer should not be in flight. 58 GR_VK_CALL(fVk, FreeCommandBuffers(fDevice, fCommandPool, 1, &fCommandBuffer)); 59 GR_VK_CALL(fVk, DestroyCommandPool(fDevice, fCommandPool, nullptr)); 60 } 61 62 sk_gpu_test::PlatformFence SK_WARN_UNUSED_RESULT insertFence() const override { 63 VkFence fence; 64 VkFenceCreateInfo info; 65 info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; 66 info.pNext = nullptr; 67 info.flags = 0; 68 GR_VK_CALL_ERRCHECK(fVk, CreateFence(fDevice, &info, nullptr, &fence)); 69 VkSubmitInfo submitInfo; 70 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; 71 submitInfo.pNext = nullptr; 72 submitInfo.waitSemaphoreCount = 0; 73 submitInfo.pWaitSemaphores = nullptr; 74 submitInfo.pWaitDstStageMask = nullptr; 75 submitInfo.commandBufferCount = 1; 76 submitInfo.pCommandBuffers = &fCommandBuffer; 77 submitInfo.signalSemaphoreCount = 0; 78 submitInfo.pSignalSemaphores = nullptr; 79 GR_VK_CALL_ERRCHECK(fVk, QueueSubmit(fQueue, 1, &submitInfo, fence)); 80 SkDEBUGCODE(++fUnfinishedSyncs;) 81 return (sk_gpu_test::PlatformFence)fence; 82 } 83 84 bool waitFence(sk_gpu_test::PlatformFence opaqueFence) const override { 85 VkFence fence = (VkFence)opaqueFence; 86 static constexpr uint64_t kForever = ~((uint64_t)0); 87 auto result = GR_VK_CALL(fVk, WaitForFences(fDevice, 1, &fence, true, kForever)); 88 return result != VK_TIMEOUT; 89 } 90 91 void deleteFence(sk_gpu_test::PlatformFence opaqueFence) const override { 92 VkFence fence = (VkFence)opaqueFence; 93 GR_VK_CALL(fVk, DestroyFence(fDevice, fence, nullptr)); 94 SkDEBUGCODE(--fUnfinishedSyncs;) 95 } 96 97 private: 98 sk_sp<const GrVkInterface> fVk; 99 VkDevice fDevice; 100 VkQueue fQueue; 101 VkCommandPool fCommandPool; 102 VkCommandBuffer fCommandBuffer; 103 SkDEBUGCODE(mutable int fUnfinishedSyncs;) 104 typedef sk_gpu_test::FenceSync INHERITED; 105 }; 106 107 GR_STATIC_ASSERT(sizeof(VkFence) <= sizeof(sk_gpu_test::PlatformFence)); 108 109 // TODO: Implement swap buffers and finish 110 class VkTestContextImpl : public sk_gpu_test::VkTestContext { 111 public: 112 static VkTestContext* Create(VkTestContext* sharedContext) { 113 sk_sp<const GrVkBackendContext> backendContext; 114 if (sharedContext) { 115 backendContext = sharedContext->getVkBackendContext(); 116 } else { 117 PFN_vkGetInstanceProcAddr instProc; 118 PFN_vkGetDeviceProcAddr devProc; 119 if (!sk_gpu_test::LoadVkLibraryAndGetProcAddrFuncs(&instProc, &devProc)) { 120 return nullptr; 121 } 122 backendContext.reset(GrVkBackendContext::Create(instProc, devProc)); 123 } 124 if (!backendContext) { 125 return nullptr; 126 } 127 return new VkTestContextImpl(std::move(backendContext)); 128 } 129 130 ~VkTestContextImpl() override { this->teardown(); } 131 132 void testAbandon() override {} 133 134 // There is really nothing to here since we don't own any unqueued command buffers here. 135 void submit() override {} 136 137 void finish() override {} 138 139 sk_sp<GrContext> makeGrContext(const GrContextOptions& options) override { 140 return GrContext::MakeVulkan(fVk, options); 141 } 142 143 protected: 144 void teardown() override { 145 INHERITED::teardown(); 146 fVk.reset(nullptr); 147 } 148 149 private: 150 VkTestContextImpl(sk_sp<const GrVkBackendContext> backendContext) 151 : VkTestContext(std::move(backendContext)) { 152 fFenceSync.reset(new VkFenceSync(fVk->fInterface, fVk->fDevice, fVk->fQueue, 153 fVk->fGraphicsQueueIndex)); 154 } 155 156 void onPlatformMakeCurrent() const override {} 157 std::function<void()> onPlatformGetAutoContextRestore() const override { return nullptr; } 158 void onPlatformSwapBuffers() const override {} 159 160 typedef sk_gpu_test::VkTestContext INHERITED; 161 }; 162 } // anonymous namespace 163 164 namespace sk_gpu_test { 165 VkTestContext* CreatePlatformVkTestContext(VkTestContext* sharedContext) { 166 return VkTestContextImpl::Create(sharedContext); 167 } 168 } // namespace sk_gpu_test 169 170 #endif 171