1 /* 2 * Copyright 2017 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "MtlTestContext.h" 9 10 #include "GrContext.h" 11 #include "GrContextOptions.h" 12 13 #import <Metal/Metal.h> 14 15 #ifdef SK_METAL 16 17 // Helper macros for autorelease pools 18 #define SK_BEGIN_AUTORELEASE_BLOCK @autoreleasepool { 19 #define SK_END_AUTORELEASE_BLOCK } 20 21 namespace { 22 /** 23 * Implements sk_gpu_test::FenceSync for Metal. 24 */ 25 26 // TODO 27 #if 0 28 class MtlFenceSync : public sk_gpu_test::FenceSync { 29 public: 30 MtlFenceSync(sk_sp<const GrVkInterface> vk, VkDevice device, VkQueue queue, 31 uint32_t queueFamilyIndex) 32 : fVk(std::move(vk)) 33 , fDevice(device) 34 , fQueue(queue) { 35 SkDEBUGCODE(fUnfinishedSyncs = 0;) 36 VkCommandPoolCreateInfo createInfo; 37 createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; 38 createInfo.pNext = nullptr; 39 createInfo.flags = 0; 40 createInfo.queueFamilyIndex = queueFamilyIndex; 41 GR_VK_CALL_ERRCHECK(fVk, CreateCommandPool(fDevice, &createInfo, nullptr, &fCommandPool)); 42 43 VkCommandBufferAllocateInfo allocateInfo; 44 allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; 45 allocateInfo.pNext = nullptr; 46 allocateInfo.commandBufferCount = 1; 47 allocateInfo.commandPool = fCommandPool; 48 allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; 49 GR_VK_CALL_ERRCHECK(fVk, AllocateCommandBuffers(fDevice, &allocateInfo, &fCommandBuffer)); 50 51 VkCommandBufferBeginInfo beginInfo; 52 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; 53 beginInfo.pNext = nullptr; 54 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; 55 beginInfo.pInheritanceInfo = nullptr; 56 GR_VK_CALL_ERRCHECK(fVk, BeginCommandBuffer(fCommandBuffer, &beginInfo)); 57 GR_VK_CALL_ERRCHECK(fVk, EndCommandBuffer(fCommandBuffer)); 58 } 59 60 ~VkFenceSync() override { 61 SkASSERT(!fUnfinishedSyncs); 62 // If the above assertion is true then the command buffer should not be in flight. 63 GR_VK_CALL(fVk, FreeCommandBuffers(fDevice, fCommandPool, 1, &fCommandBuffer)); 64 GR_VK_CALL(fVk, DestroyCommandPool(fDevice, fCommandPool, nullptr)); 65 } 66 67 sk_gpu_test::PlatformFence SK_WARN_UNUSED_RESULT insertFence() const override { 68 VkFence fence; 69 VkFenceCreateInfo info; 70 info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; 71 info.pNext = nullptr; 72 info.flags = 0; 73 GR_VK_CALL_ERRCHECK(fVk, CreateFence(fDevice, &info, nullptr, &fence)); 74 VkSubmitInfo submitInfo; 75 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; 76 submitInfo.pNext = nullptr; 77 submitInfo.waitSemaphoreCount = 0; 78 submitInfo.pWaitSemaphores = nullptr; 79 submitInfo.pWaitDstStageMask = nullptr; 80 submitInfo.commandBufferCount = 1; 81 submitInfo.pCommandBuffers = &fCommandBuffer; 82 submitInfo.signalSemaphoreCount = 0; 83 submitInfo.pSignalSemaphores = nullptr; 84 GR_VK_CALL_ERRCHECK(fVk, QueueSubmit(fQueue, 1, &submitInfo, fence)); 85 SkDEBUGCODE(++fUnfinishedSyncs;) 86 return (sk_gpu_test::PlatformFence)fence; 87 } 88 89 bool waitFence(sk_gpu_test::PlatformFence opaqueFence) const override { 90 VkFence fence = (VkFence)opaqueFence; 91 static constexpr uint64_t kForever = ~((uint64_t)0); 92 auto result = GR_VK_CALL(fVk, WaitForFences(fDevice, 1, &fence, true, kForever)); 93 return result != VK_TIMEOUT; 94 } 95 96 void deleteFence(sk_gpu_test::PlatformFence opaqueFence) const override { 97 VkFence fence = (VkFence)opaqueFence; 98 GR_VK_CALL(fVk, DestroyFence(fDevice, fence, nullptr)); 99 SkDEBUGCODE(--fUnfinishedSyncs;) 100 } 101 102 private: 103 sk_sp<const GrVkInterface> fVk; 104 VkDevice fDevice; 105 VkQueue fQueue; 106 VkCommandPool fCommandPool; 107 VkCommandBuffer fCommandBuffer; 108 SkDEBUGCODE(mutable int fUnfinishedSyncs;) 109 typedef sk_gpu_test::FenceSync INHERITED; 110 }; 111 112 GR_STATIC_ASSERT(sizeof(VkFence) <= sizeof(sk_gpu_test::PlatformFence)); 113 #endif 114 115 class MtlTestContext : public sk_gpu_test::TestContext { 116 public: 117 static MtlTestContext* Create(TestContext* sharedContext) { 118 SK_BEGIN_AUTORELEASE_BLOCK 119 SkASSERT(!sharedContext); 120 id<MTLDevice> device = MTLCreateSystemDefaultDevice(); 121 id<MTLCommandQueue> queue = [device newCommandQueue]; 122 123 return new MtlTestContext(device, queue); 124 SK_END_AUTORELEASE_BLOCK 125 } 126 127 ~MtlTestContext() override { this->teardown(); } 128 129 GrBackendApi backend() override { return GrBackendApi::kMetal; } 130 131 void testAbandon() override {} 132 133 // There is really nothing to here since we don't own any unqueued command buffers here. 134 void submit() override {} 135 136 void finish() override {} 137 138 sk_sp<GrContext> makeGrContext(const GrContextOptions& options) override { 139 return GrContext::MakeMetal((__bridge_retained void*)fDevice, 140 (__bridge_retained void*)fQueue, 141 options); 142 } 143 144 private: 145 MtlTestContext(id<MTLDevice> device, id<MTLCommandQueue> queue) 146 : fDevice(device), fQueue(queue) { 147 fFenceSync.reset(nullptr); 148 } 149 150 void onPlatformMakeCurrent() const override {} 151 std::function<void()> onPlatformGetAutoContextRestore() const override { return nullptr; } 152 void onPlatformSwapBuffers() const override {} 153 154 id<MTLDevice> fDevice; 155 id<MTLCommandQueue> fQueue; 156 157 typedef sk_gpu_test::TestContext INHERITED; 158 }; 159 160 } // anonymous namespace 161 162 namespace sk_gpu_test { 163 164 TestContext* CreatePlatformMtlTestContext(TestContext* sharedContext) { 165 return MtlTestContext::Create(sharedContext); 166 } 167 } // namespace sk_gpu_test 168 169 170 #endif 171