1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrVkCommandBuffer.h" 9 10 #include "GrVkFramebuffer.h" 11 #include "GrVkImageView.h" 12 #include "GrVkRenderPass.h" 13 #include "GrVkRenderTarget.h" 14 #include "GrVkProgram.h" 15 #include "GrVkTransferBuffer.h" 16 #include "GrVkUtil.h" 17 18 GrVkCommandBuffer* GrVkCommandBuffer::Create(const GrVkGpu* gpu, VkCommandPool cmdPool) { 19 const VkCommandBufferAllocateInfo cmdInfo = { 20 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType 21 NULL, // pNext 22 cmdPool, // commandPool 23 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level 24 1 // bufferCount 25 }; 26 27 VkCommandBuffer cmdBuffer; 28 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(), 29 &cmdInfo, 30 &cmdBuffer)); 31 if (err) { 32 return nullptr; 33 } 34 return new GrVkCommandBuffer(cmdBuffer); 35 } 36 37 GrVkCommandBuffer::~GrVkCommandBuffer() { 38 // Should have ended any render pass we're in the middle of 39 SkASSERT(!fActiveRenderPass); 40 } 41 42 void GrVkCommandBuffer::invalidateState() { 43 fBoundVertexBuffer = 0; 44 fBoundVertexBufferIsValid = false; 45 fBoundIndexBuffer = 0; 46 fBoundIndexBufferIsValid = false; 47 } 48 49 void GrVkCommandBuffer::freeGPUData(const GrVkGpu* gpu) const { 50 SkASSERT(!fIsActive); 51 SkASSERT(!fActiveRenderPass); 52 for (int i = 0; i < fTrackedResources.count(); ++i) { 53 fTrackedResources[i]->unref(gpu); 54 } 55 56 // Destroy the fence, if any 57 if (VK_NULL_HANDLE != fSubmitFence) { 58 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr)); 59 } 60 61 GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), gpu->cmdPool(), 62 1, &fCmdBuffer)); 63 } 64 65 void GrVkCommandBuffer::abandonSubResources() const { 66 for (int i = 0; i < fTrackedResources.count(); ++i) { 67 fTrackedResources[i]->unrefAndAbandon(); 68 } 69 } 70 71 void GrVkCommandBuffer::begin(const GrVkGpu* gpu) { 72 SkASSERT(!fIsActive); 73 VkCommandBufferBeginInfo cmdBufferBeginInfo; 74 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); 75 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; 76 cmdBufferBeginInfo.pNext = nullptr; 77 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; 78 cmdBufferBeginInfo.pInheritanceInfo = nullptr; 79 80 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer, 81 &cmdBufferBeginInfo)); 82 fIsActive = true; 83 } 84 85 void GrVkCommandBuffer::end(const GrVkGpu* gpu) { 86 SkASSERT(fIsActive); 87 SkASSERT(!fActiveRenderPass); 88 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer)); 89 this->invalidateState(); 90 fIsActive = false; 91 } 92 93 /////////////////////////////////////////////////////////////////////////////// 94 95 void GrVkCommandBuffer::beginRenderPass(const GrVkGpu* gpu, 96 const GrVkRenderPass* renderPass, 97 const GrVkRenderTarget& target) { 98 SkASSERT(fIsActive); 99 SkASSERT(!fActiveRenderPass); 100 VkRenderPassBeginInfo beginInfo; 101 VkSubpassContents contents; 102 renderPass->getBeginInfo(target, &beginInfo, &contents); 103 GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents)); 104 fActiveRenderPass = renderPass; 105 this->addResource(renderPass); 106 target.addResources(*this); 107 108 } 109 110 void GrVkCommandBuffer::endRenderPass(const GrVkGpu* gpu) { 111 SkASSERT(fIsActive); 112 SkASSERT(fActiveRenderPass); 113 GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer)); 114 fActiveRenderPass = nullptr; 115 } 116 117 void GrVkCommandBuffer::submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync) { 118 SkASSERT(!fIsActive); 119 120 VkResult err; 121 VkFenceCreateInfo fenceInfo; 122 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); 123 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; 124 err = GR_VK_CALL(gpu->vkInterface(), CreateFence(gpu->device(), &fenceInfo, nullptr, 125 &fSubmitFence)); 126 SkASSERT(!err); 127 128 VkSubmitInfo submitInfo; 129 memset(&submitInfo, 0, sizeof(VkSubmitInfo)); 130 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; 131 submitInfo.pNext = nullptr; 132 submitInfo.waitSemaphoreCount = 0; 133 submitInfo.pWaitSemaphores = nullptr; 134 submitInfo.commandBufferCount = 1; 135 submitInfo.pCommandBuffers = &fCmdBuffer; 136 submitInfo.signalSemaphoreCount = 0; 137 submitInfo.pSignalSemaphores = nullptr; 138 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), QueueSubmit(queue, 1, &submitInfo, fSubmitFence)); 139 140 if (GrVkGpu::kForce_SyncQueue == sync) { 141 err = GR_VK_CALL(gpu->vkInterface(), 142 WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX)); 143 if (VK_TIMEOUT == err) { 144 SkDebugf("Fence failed to signal: %d\n", err); 145 SkFAIL("failing"); 146 } 147 SkASSERT(!err); 148 149 // Destroy the fence 150 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr)); 151 fSubmitFence = VK_NULL_HANDLE; 152 } 153 } 154 155 bool GrVkCommandBuffer::finished(const GrVkGpu* gpu) const { 156 if (VK_NULL_HANDLE == fSubmitFence) { 157 return true; 158 } 159 160 VkResult err = GR_VK_CALL(gpu->vkInterface(), GetFenceStatus(gpu->device(), fSubmitFence)); 161 switch (err) { 162 case VK_SUCCESS: 163 return true; 164 165 case VK_NOT_READY: 166 return false; 167 168 default: 169 SkDebugf("Error getting fence status: %d\n", err); 170 SkFAIL("failing"); 171 break; 172 } 173 174 return false; 175 } 176 177 //////////////////////////////////////////////////////////////////////////////// 178 // CommandBuffer commands 179 //////////////////////////////////////////////////////////////////////////////// 180 181 void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu, 182 VkPipelineStageFlags srcStageMask, 183 VkPipelineStageFlags dstStageMask, 184 bool byRegion, 185 BarrierType barrierType, 186 void* barrier) const { 187 SkASSERT(fIsActive); 188 VkDependencyFlags dependencyFlags = byRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0; 189 190 switch (barrierType) { 191 case kMemory_BarrierType: { 192 const VkMemoryBarrier* barrierPtr = reinterpret_cast<VkMemoryBarrier*>(barrier); 193 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, 194 dstStageMask, dependencyFlags, 195 1, barrierPtr, 196 0, nullptr, 197 0, nullptr)); 198 break; 199 } 200 201 case kBufferMemory_BarrierType: { 202 const VkBufferMemoryBarrier* barrierPtr = 203 reinterpret_cast<VkBufferMemoryBarrier*>(barrier); 204 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, 205 dstStageMask, dependencyFlags, 206 0, nullptr, 207 1, barrierPtr, 208 0, nullptr)); 209 break; 210 } 211 212 case kImageMemory_BarrierType: { 213 const VkImageMemoryBarrier* barrierPtr = 214 reinterpret_cast<VkImageMemoryBarrier*>(barrier); 215 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, 216 dstStageMask, dependencyFlags, 217 0, nullptr, 218 0, nullptr, 219 1, barrierPtr)); 220 break; 221 } 222 } 223 224 } 225 226 void GrVkCommandBuffer::copyImage(const GrVkGpu* gpu, 227 GrVkImage* srcImage, 228 VkImageLayout srcLayout, 229 GrVkImage* dstImage, 230 VkImageLayout dstLayout, 231 uint32_t copyRegionCount, 232 const VkImageCopy* copyRegions) { 233 SkASSERT(fIsActive); 234 SkASSERT(!fActiveRenderPass); 235 this->addResource(srcImage->resource()); 236 this->addResource(dstImage->resource()); 237 GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer, 238 srcImage->textureImage(), 239 srcLayout, 240 dstImage->textureImage(), 241 dstLayout, 242 copyRegionCount, 243 copyRegions)); 244 } 245 246 void GrVkCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu, 247 GrVkImage* srcImage, 248 VkImageLayout srcLayout, 249 GrVkTransferBuffer* dstBuffer, 250 uint32_t copyRegionCount, 251 const VkBufferImageCopy* copyRegions) { 252 SkASSERT(fIsActive); 253 SkASSERT(!fActiveRenderPass); 254 this->addResource(srcImage->resource()); 255 this->addResource(dstBuffer->resource()); 256 GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer, 257 srcImage->textureImage(), 258 srcLayout, 259 dstBuffer->buffer(), 260 copyRegionCount, 261 copyRegions)); 262 } 263 264 void GrVkCommandBuffer::copyBufferToImage(const GrVkGpu* gpu, 265 GrVkTransferBuffer* srcBuffer, 266 GrVkImage* dstImage, 267 VkImageLayout dstLayout, 268 uint32_t copyRegionCount, 269 const VkBufferImageCopy* copyRegions) { 270 SkASSERT(fIsActive); 271 SkASSERT(!fActiveRenderPass); 272 this->addResource(srcBuffer->resource()); 273 this->addResource(dstImage->resource()); 274 GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer, 275 srcBuffer->buffer(), 276 dstImage->textureImage(), 277 dstLayout, 278 copyRegionCount, 279 copyRegions)); 280 } 281 282 void GrVkCommandBuffer::clearColorImage(const GrVkGpu* gpu, 283 GrVkImage* image, 284 const VkClearColorValue* color, 285 uint32_t subRangeCount, 286 const VkImageSubresourceRange* subRanges) { 287 SkASSERT(fIsActive); 288 SkASSERT(!fActiveRenderPass); 289 this->addResource(image->resource()); 290 GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer, 291 image->textureImage(), 292 image->currentLayout(), 293 color, 294 subRangeCount, 295 subRanges)); 296 } 297 298 void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu, 299 int numAttachments, 300 const VkClearAttachment* attachments, 301 int numRects, 302 const VkClearRect* clearRects) const { 303 SkASSERT(fIsActive); 304 SkASSERT(fActiveRenderPass); 305 SkASSERT(numAttachments > 0); 306 SkASSERT(numRects > 0); 307 #ifdef SK_DEBUG 308 for (int i = 0; i < numAttachments; ++i) { 309 if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) { 310 uint32_t testIndex; 311 SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex)); 312 SkASSERT(testIndex == attachments[i].colorAttachment); 313 } 314 } 315 #endif 316 GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer, 317 numAttachments, 318 attachments, 319 numRects, 320 clearRects)); 321 } 322 323 void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu, 324 GrVkProgram* program, 325 VkPipelineLayout layout, 326 uint32_t firstSet, 327 uint32_t setCount, 328 const VkDescriptorSet* descriptorSets, 329 uint32_t dynamicOffsetCount, 330 const uint32_t* dynamicOffsets) { 331 SkASSERT(fIsActive); 332 GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer, 333 VK_PIPELINE_BIND_POINT_GRAPHICS, 334 layout, 335 firstSet, 336 setCount, 337 descriptorSets, 338 dynamicOffsetCount, 339 dynamicOffsets)); 340 program->addUniformResources(*this); 341 } 342 343 void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu, 344 uint32_t indexCount, 345 uint32_t instanceCount, 346 uint32_t firstIndex, 347 int32_t vertexOffset, 348 uint32_t firstInstance) const { 349 SkASSERT(fIsActive); 350 SkASSERT(fActiveRenderPass); 351 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer, 352 indexCount, 353 instanceCount, 354 firstIndex, 355 vertexOffset, 356 firstInstance)); 357 } 358 359 void GrVkCommandBuffer::draw(const GrVkGpu* gpu, 360 uint32_t vertexCount, 361 uint32_t instanceCount, 362 uint32_t firstVertex, 363 uint32_t firstInstance) const { 364 SkASSERT(fIsActive); 365 SkASSERT(fActiveRenderPass); 366 GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer, 367 vertexCount, 368 instanceCount, 369 firstVertex, 370 firstInstance)); 371 } 372