Home | History | Annotate | Download | only in vk
      1 /*
      2 * Copyright 2016 Google Inc.
      3 *
      4 * Use of this source code is governed by a BSD-style license that can be
      5 * found in the LICENSE file.
      6 */
      7 
      8 #include "GrVkGpuCommandBuffer.h"
      9 
     10 #include "GrFixedClip.h"
     11 #include "GrMesh.h"
     12 #include "GrOpFlushState.h"
     13 #include "GrPipeline.h"
     14 #include "GrRenderTargetPriv.h"
     15 #include "GrTexturePriv.h"
     16 #include "GrVkCommandBuffer.h"
     17 #include "GrVkGpu.h"
     18 #include "GrVkPipeline.h"
     19 #include "GrVkRenderPass.h"
     20 #include "GrVkRenderTarget.h"
     21 #include "GrVkResourceProvider.h"
     22 #include "GrVkTexture.h"
     23 #include "SkRect.h"
     24 
     25 void GrVkGpuTextureCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin,
     26                                        const SkIRect& srcRect, const SkIPoint& dstPoint) {
     27     fCopies.emplace_back(src, srcOrigin, srcRect, dstPoint);
     28 }
     29 
     30 void GrVkGpuTextureCommandBuffer::insertEventMarker(const char* msg) {
     31     // TODO: does Vulkan have a correlate?
     32 }
     33 
     34 void GrVkGpuTextureCommandBuffer::submit() {
     35     for (int i = 0; i < fCopies.count(); ++i) {
     36         CopyInfo& copyInfo = fCopies[i];
     37         fGpu->copySurface(fTexture, fOrigin, copyInfo.fSrc, copyInfo.fSrcOrigin, copyInfo.fSrcRect,
     38                           copyInfo.fDstPoint);
     39     }
     40 }
     41 
     42 GrVkGpuTextureCommandBuffer::~GrVkGpuTextureCommandBuffer() {}
     43 
     44 ////////////////////////////////////////////////////////////////////////////////
     45 
     46 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
     47                            VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
     48     switch (loadOpIn) {
     49         case GrLoadOp::kLoad:
     50             *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
     51             break;
     52         case GrLoadOp::kClear:
     53             *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
     54             break;
     55         case GrLoadOp::kDiscard:
     56             *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
     57             break;
     58         default:
     59             SK_ABORT("Invalid LoadOp");
     60             *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
     61     }
     62 
     63     switch (storeOpIn) {
     64         case GrStoreOp::kStore:
     65             *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
     66             break;
     67         case GrStoreOp::kDiscard:
     68             *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
     69             break;
     70         default:
     71             SK_ABORT("Invalid StoreOp");
     72             *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
     73     }
     74 }
     75 
     76 GrVkGpuRTCommandBuffer::GrVkGpuRTCommandBuffer(GrVkGpu* gpu,
     77                                                GrRenderTarget* rt, GrSurfaceOrigin origin,
     78                                                const LoadAndStoreInfo& colorInfo,
     79                                                const StencilLoadAndStoreInfo& stencilInfo)
     80         : INHERITED(rt, origin)
     81         , fGpu(gpu)
     82         , fClearColor(GrColor4f::FromGrColor(colorInfo.fClearColor))
     83         , fLastPipelineState(nullptr) {
     84     get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp,
     85                           &fVkColorLoadOp, &fVkColorStoreOp);
     86 
     87     get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp,
     88                           &fVkStencilLoadOp, &fVkStencilStoreOp);
     89     fCurrentCmdInfo = -1;
     90 
     91     this->init();
     92 }
     93 
     94 void GrVkGpuRTCommandBuffer::init() {
     95     GrVkRenderPass::LoadStoreOps vkColorOps(fVkColorLoadOp, fVkColorStoreOp);
     96     GrVkRenderPass::LoadStoreOps vkStencilOps(fVkStencilLoadOp, fVkStencilStoreOp);
     97 
     98     CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
     99     SkASSERT(fCommandBufferInfos.count() == 1);
    100     fCurrentCmdInfo = 0;
    101 
    102     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
    103     const GrVkResourceProvider::CompatibleRPHandle& rpHandle = vkRT->compatibleRenderPassHandle();
    104     if (rpHandle.isValid()) {
    105         cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
    106                                                                      vkColorOps,
    107                                                                      vkStencilOps);
    108     } else {
    109         cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
    110                                                                      vkColorOps,
    111                                                                      vkStencilOps);
    112     }
    113 
    114     cbInfo.fColorClearValue.color.float32[0] = fClearColor.fRGBA[0];
    115     cbInfo.fColorClearValue.color.float32[1] = fClearColor.fRGBA[1];
    116     cbInfo.fColorClearValue.color.float32[2] = fClearColor.fRGBA[2];
    117     cbInfo.fColorClearValue.color.float32[3] = fClearColor.fRGBA[3];
    118 
    119     if (VK_ATTACHMENT_LOAD_OP_CLEAR == fVkColorLoadOp) {
    120         cbInfo.fBounds = SkRect::MakeWH(vkRT->width(), vkRT->height());
    121     } else {
    122         cbInfo.fBounds.setEmpty();
    123     }
    124 
    125     if (VK_ATTACHMENT_LOAD_OP_CLEAR == fVkColorLoadOp) {
    126         cbInfo.fLoadStoreState = LoadStoreState::kStartsWithClear;
    127     } else if (VK_ATTACHMENT_LOAD_OP_LOAD == fVkColorLoadOp &&
    128                VK_ATTACHMENT_STORE_OP_STORE == fVkColorStoreOp) {
    129         cbInfo.fLoadStoreState = LoadStoreState::kLoadAndStore;
    130     } else if (VK_ATTACHMENT_LOAD_OP_DONT_CARE == fVkColorLoadOp) {
    131         cbInfo.fLoadStoreState = LoadStoreState::kStartsWithDiscard;
    132     }
    133 
    134     cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
    135     cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
    136 }
    137 
    138 
    139 GrVkGpuRTCommandBuffer::~GrVkGpuRTCommandBuffer() {
    140     for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
    141         CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
    142         for (int j = 0; j < cbInfo.fCommandBuffers.count(); ++j) {
    143             cbInfo.fCommandBuffers[j]->unref(fGpu);
    144         }
    145         cbInfo.fRenderPass->unref(fGpu);
    146     }
    147 }
    148 
    149 GrGpu* GrVkGpuRTCommandBuffer::gpu() { return fGpu; }
    150 
    151 void GrVkGpuRTCommandBuffer::end() {
    152     if (fCurrentCmdInfo >= 0) {
    153         fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
    154     }
    155 }
    156 
    157 void GrVkGpuRTCommandBuffer::submit() {
    158     if (!fRenderTarget) {
    159         return;
    160     }
    161 
    162     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
    163     GrVkImage* targetImage = vkRT->msaaImage() ? vkRT->msaaImage() : vkRT;
    164     GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment();
    165 
    166     for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
    167         CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
    168 
    169         for (int j = 0; j < cbInfo.fPreDrawUploads.count(); ++j) {
    170             InlineUploadInfo& iuInfo = cbInfo.fPreDrawUploads[j];
    171             iuInfo.fFlushState->doUpload(iuInfo.fUpload);
    172         }
    173 
    174         for (int j = 0; j < cbInfo.fPreCopies.count(); ++j) {
    175             CopyInfo& copyInfo = cbInfo.fPreCopies[j];
    176             fGpu->copySurface(fRenderTarget, fOrigin, copyInfo.fSrc, copyInfo.fSrcOrigin,
    177                               copyInfo.fSrcRect, copyInfo.fDstPoint, copyInfo.fShouldDiscardDst);
    178         }
    179 
    180         // Make sure we do the following layout changes after all copies, uploads, or any other pre
    181         // work is done since we may change the layouts in the pre-work. Also since the draws will
    182         // be submitted in different render passes, we need to guard againts write and write issues.
    183 
    184         // Change layout of our render target so it can be used as the color attachment.
    185         targetImage->setImageLayout(fGpu,
    186                                     VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
    187                                     VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
    188                                     VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
    189                                     false);
    190 
    191         // If we are using a stencil attachment we also need to update its layout
    192         if (stencil) {
    193             GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
    194             vkStencil->setImageLayout(fGpu,
    195                                       VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
    196                                       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
    197                                       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
    198                                       VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
    199                                       false);
    200         }
    201 
    202         // If we have any sampled images set their layout now.
    203         for (int j = 0; j < cbInfo.fSampledImages.count(); ++j) {
    204             cbInfo.fSampledImages[j]->setImageLayout(fGpu,
    205                                                      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
    206                                                      VK_ACCESS_SHADER_READ_BIT,
    207                                                      VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
    208                                                      false);
    209         }
    210 
    211         // TODO: We can't add this optimization yet since many things create a scratch texture which
    212         // adds the discard immediately, but then don't draw to it right away. This causes the
    213         // discard to be ignored and we get yelled at for loading uninitialized data. However, once
    214         // MDB lands, the discard will get reordered with the rest of the draw commands and we can
    215         // re-enable this.
    216 #if 0
    217         if (cbInfo.fIsEmpty && cbInfo.fLoadStoreState != kStartsWithClear) {
    218             // We have sumbitted no actual draw commands to the command buffer and we are not using
    219             // the render pass to do a clear so there is no need to submit anything.
    220             continue;
    221         }
    222 #endif
    223         if (cbInfo.fBounds.intersect(0, 0,
    224                                      SkIntToScalar(fRenderTarget->width()),
    225                                      SkIntToScalar(fRenderTarget->height()))) {
    226             SkIRect iBounds;
    227             cbInfo.fBounds.roundOut(&iBounds);
    228 
    229             fGpu->submitSecondaryCommandBuffer(cbInfo.fCommandBuffers, cbInfo.fRenderPass,
    230                                                &cbInfo.fColorClearValue, vkRT, fOrigin, iBounds);
    231         }
    232     }
    233 }
    234 
    235 void GrVkGpuRTCommandBuffer::discard() {
    236     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
    237 
    238     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
    239     if (cbInfo.fIsEmpty) {
    240         // Change the render pass to do a don't-care load for both color & stencil
    241         GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
    242                                                 VK_ATTACHMENT_STORE_OP_STORE);
    243         GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
    244                                                   VK_ATTACHMENT_STORE_OP_STORE);
    245 
    246         const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
    247 
    248         const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
    249             vkRT->compatibleRenderPassHandle();
    250         if (rpHandle.isValid()) {
    251             cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
    252                                                                          vkColorOps,
    253                                                                          vkStencilOps);
    254         } else {
    255             cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
    256                                                                          vkColorOps,
    257                                                                          vkStencilOps);
    258         }
    259 
    260         SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
    261         oldRP->unref(fGpu);
    262         cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
    263         cbInfo.fLoadStoreState = LoadStoreState::kStartsWithDiscard;
    264         // If we are going to discard the whole render target then the results of any copies we did
    265         // immediately before to the target won't matter, so just drop them.
    266         cbInfo.fPreCopies.reset();
    267     }
    268 }
    269 
    270 void GrVkGpuRTCommandBuffer::insertEventMarker(const char* msg) {
    271     // TODO: does Vulkan have a correlate?
    272 }
    273 
    274 void GrVkGpuRTCommandBuffer::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
    275     SkASSERT(!clip.hasWindowRectangles());
    276 
    277     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
    278 
    279     GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
    280     // this should only be called internally when we know we have a
    281     // stencil buffer.
    282     SkASSERT(sb);
    283     int stencilBitCount = sb->bits();
    284 
    285     // The contract with the callers does not guarantee that we preserve all bits in the stencil
    286     // during this clear. Thus we will clear the entire stencil to the desired value.
    287 
    288     VkClearDepthStencilValue vkStencilColor;
    289     memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
    290     if (insideStencilMask) {
    291         vkStencilColor.stencil = (1 << (stencilBitCount - 1));
    292     } else {
    293         vkStencilColor.stencil = 0;
    294     }
    295 
    296     VkClearRect clearRect;
    297     // Flip rect if necessary
    298     SkIRect vkRect;
    299     if (!clip.scissorEnabled()) {
    300         vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
    301     } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
    302         vkRect = clip.scissorRect();
    303     } else {
    304         const SkIRect& scissor = clip.scissorRect();
    305         vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
    306                        scissor.fRight, fRenderTarget->height() - scissor.fTop);
    307     }
    308 
    309     clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
    310     clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
    311 
    312     clearRect.baseArrayLayer = 0;
    313     clearRect.layerCount = 1;
    314 
    315     uint32_t stencilIndex;
    316     SkAssertResult(cbInfo.fRenderPass->stencilAttachmentIndex(&stencilIndex));
    317 
    318     VkClearAttachment attachment;
    319     attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
    320     attachment.colorAttachment = 0; // this value shouldn't matter
    321     attachment.clearValue.depthStencil = vkStencilColor;
    322 
    323     cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
    324     cbInfo.fIsEmpty = false;
    325 
    326     // Update command buffer bounds
    327     if (!clip.scissorEnabled()) {
    328         cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
    329     } else {
    330         cbInfo.fBounds.join(SkRect::Make(clip.scissorRect()));
    331     }
    332 }
    333 
    334 void GrVkGpuRTCommandBuffer::onClear(const GrFixedClip& clip, GrColor color) {
    335     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
    336 
    337     // parent class should never let us get here with no RT
    338     SkASSERT(!clip.hasWindowRectangles());
    339 
    340     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
    341 
    342     VkClearColorValue vkColor;
    343     GrColorToRGBAFloat(color, vkColor.float32);
    344 
    345     if (cbInfo.fIsEmpty && !clip.scissorEnabled()) {
    346         // Change the render pass to do a clear load
    347         GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_CLEAR,
    348                                                 VK_ATTACHMENT_STORE_OP_STORE);
    349         // Preserve the stencil buffer's load & store settings
    350         GrVkRenderPass::LoadStoreOps vkStencilOps(fVkStencilLoadOp, fVkStencilStoreOp);
    351 
    352         const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
    353 
    354         const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
    355             vkRT->compatibleRenderPassHandle();
    356         if (rpHandle.isValid()) {
    357             cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
    358                                                                          vkColorOps,
    359                                                                          vkStencilOps);
    360         } else {
    361             cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
    362                                                                          vkColorOps,
    363                                                                          vkStencilOps);
    364         }
    365 
    366         SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
    367         oldRP->unref(fGpu);
    368 
    369         GrColorToRGBAFloat(color, cbInfo.fColorClearValue.color.float32);
    370         cbInfo.fLoadStoreState = LoadStoreState::kStartsWithClear;
    371         // If we are going to clear the whole render target then the results of any copies we did
    372         // immediately before to the target won't matter, so just drop them.
    373         cbInfo.fPreCopies.reset();
    374 
    375         // Update command buffer bounds
    376         cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
    377         return;
    378     }
    379 
    380     // We always do a sub rect clear with clearAttachments since we are inside a render pass
    381     VkClearRect clearRect;
    382     // Flip rect if necessary
    383     SkIRect vkRect;
    384     if (!clip.scissorEnabled()) {
    385         vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
    386     } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
    387         vkRect = clip.scissorRect();
    388     } else {
    389         const SkIRect& scissor = clip.scissorRect();
    390         vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
    391                        scissor.fRight, fRenderTarget->height() - scissor.fTop);
    392     }
    393     clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
    394     clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
    395     clearRect.baseArrayLayer = 0;
    396     clearRect.layerCount = 1;
    397 
    398     uint32_t colorIndex;
    399     SkAssertResult(cbInfo.fRenderPass->colorAttachmentIndex(&colorIndex));
    400 
    401     VkClearAttachment attachment;
    402     attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
    403     attachment.colorAttachment = colorIndex;
    404     attachment.clearValue.color = vkColor;
    405 
    406     cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
    407     cbInfo.fIsEmpty = false;
    408 
    409     // Update command buffer bounds
    410     if (!clip.scissorEnabled()) {
    411         cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
    412     } else {
    413         cbInfo.fBounds.join(SkRect::Make(clip.scissorRect()));
    414     }
    415     return;
    416 }
    417 
    418 ////////////////////////////////////////////////////////////////////////////////
    419 
    420 void GrVkGpuRTCommandBuffer::addAdditionalCommandBuffer() {
    421     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
    422 
    423     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
    424     cbInfo.currentCmdBuf()->end(fGpu);
    425     cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
    426     cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
    427 }
    428 
    429 void GrVkGpuRTCommandBuffer::addAdditionalRenderPass() {
    430     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
    431 
    432     fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
    433 
    434     CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
    435     fCurrentCmdInfo++;
    436 
    437     GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
    438                                             VK_ATTACHMENT_STORE_OP_STORE);
    439     GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
    440                                               VK_ATTACHMENT_STORE_OP_STORE);
    441 
    442     const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
    443             vkRT->compatibleRenderPassHandle();
    444     if (rpHandle.isValid()) {
    445         cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
    446                                                                      vkColorOps,
    447                                                                      vkStencilOps);
    448     } else {
    449         cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
    450                                                                      vkColorOps,
    451                                                                      vkStencilOps);
    452     }
    453     cbInfo.fLoadStoreState = LoadStoreState::kLoadAndStore;
    454 
    455     cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
    456     // It shouldn't matter what we set the clear color to here since we will assume loading of the
    457     // attachment.
    458     memset(&cbInfo.fColorClearValue, 0, sizeof(VkClearValue));
    459     cbInfo.fBounds.setEmpty();
    460 
    461     cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
    462 }
    463 
    464 void GrVkGpuRTCommandBuffer::inlineUpload(GrOpFlushState* state,
    465                                           GrDeferredTextureUploadFn& upload) {
    466     if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty) {
    467         this->addAdditionalRenderPass();
    468     }
    469     fCommandBufferInfos[fCurrentCmdInfo].fPreDrawUploads.emplace_back(state, upload);
    470 }
    471 
    472 void GrVkGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,
    473                                   const SkIPoint& dstPoint) {
    474     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
    475     if (!cbInfo.fIsEmpty || LoadStoreState::kStartsWithClear == cbInfo.fLoadStoreState) {
    476         this->addAdditionalRenderPass();
    477     }
    478 
    479     fCommandBufferInfos[fCurrentCmdInfo].fPreCopies.emplace_back(
    480             src, srcOrigin, srcRect, dstPoint,
    481             LoadStoreState::kStartsWithDiscard == cbInfo.fLoadStoreState);
    482 
    483     if (LoadStoreState::kLoadAndStore != cbInfo.fLoadStoreState) {
    484         // Change the render pass to do a load and store so we don't lose the results of our copy
    485         GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
    486                                                 VK_ATTACHMENT_STORE_OP_STORE);
    487         GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
    488                                                   VK_ATTACHMENT_STORE_OP_STORE);
    489 
    490         const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
    491 
    492         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
    493         const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
    494                 vkRT->compatibleRenderPassHandle();
    495         if (rpHandle.isValid()) {
    496             cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
    497                                                                          vkColorOps,
    498                                                                          vkStencilOps);
    499         } else {
    500             cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
    501                                                                          vkColorOps,
    502                                                                          vkStencilOps);
    503         }
    504         SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
    505         oldRP->unref(fGpu);
    506 
    507         cbInfo.fLoadStoreState = LoadStoreState::kLoadAndStore;
    508 
    509     }
    510 }
    511 
    512 ////////////////////////////////////////////////////////////////////////////////
    513 
    514 void GrVkGpuRTCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc,
    515                                           const GrBuffer* indexBuffer,
    516                                           const GrBuffer* vertexBuffer,
    517                                           const GrBuffer* instanceBuffer) {
    518     GrVkSecondaryCommandBuffer* currCmdBuf = fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf();
    519     // There is no need to put any memory barriers to make sure host writes have finished here.
    520     // When a command buffer is submitted to a queue, there is an implicit memory barrier that
    521     // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
    522     // an active RenderPass.
    523 
    524     // Here our vertex and instance inputs need to match the same 0-based bindings they were
    525     // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
    526     uint32_t binding = 0;
    527 
    528     if (primProc.hasVertexAttribs()) {
    529         SkASSERT(vertexBuffer);
    530         SkASSERT(!vertexBuffer->isCPUBacked());
    531         SkASSERT(!vertexBuffer->isMapped());
    532 
    533         currCmdBuf->bindInputBuffer(fGpu, binding++,
    534                                     static_cast<const GrVkVertexBuffer*>(vertexBuffer));
    535     }
    536 
    537     if (primProc.hasInstanceAttribs()) {
    538         SkASSERT(instanceBuffer);
    539         SkASSERT(!instanceBuffer->isCPUBacked());
    540         SkASSERT(!instanceBuffer->isMapped());
    541 
    542         currCmdBuf->bindInputBuffer(fGpu, binding++,
    543                                     static_cast<const GrVkVertexBuffer*>(instanceBuffer));
    544     }
    545 
    546     if (indexBuffer) {
    547         SkASSERT(indexBuffer);
    548         SkASSERT(!indexBuffer->isMapped());
    549         SkASSERT(!indexBuffer->isCPUBacked());
    550 
    551         currCmdBuf->bindIndexBuffer(fGpu, static_cast<const GrVkIndexBuffer*>(indexBuffer));
    552     }
    553 }
    554 
    555 GrVkPipelineState* GrVkGpuRTCommandBuffer::prepareDrawState(const GrPipeline& pipeline,
    556                                                             const GrPrimitiveProcessor& primProc,
    557                                                             GrPrimitiveType primitiveType,
    558                                                             bool hasDynamicState) {
    559     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
    560     SkASSERT(cbInfo.fRenderPass);
    561 
    562     GrVkPipelineState* pipelineState =
    563         fGpu->resourceProvider().findOrCreateCompatiblePipelineState(pipeline,
    564                                                                      primProc,
    565                                                                      primitiveType,
    566                                                                      *cbInfo.fRenderPass);
    567     if (!pipelineState) {
    568         return pipelineState;
    569     }
    570 
    571     if (!cbInfo.fIsEmpty &&
    572         fLastPipelineState && fLastPipelineState != pipelineState &&
    573         fGpu->vkCaps().newCBOnPipelineChange()) {
    574         this->addAdditionalCommandBuffer();
    575     }
    576     fLastPipelineState = pipelineState;
    577 
    578     pipelineState->setData(fGpu, primProc, pipeline);
    579 
    580     pipelineState->bind(fGpu, cbInfo.currentCmdBuf());
    581 
    582     GrRenderTarget* rt = pipeline.renderTarget();
    583 
    584     if (!pipeline.getScissorState().enabled()) {
    585         GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(),
    586                                                  rt, pipeline.proxy()->origin(),
    587                                                  SkIRect::MakeWH(rt->width(), rt->height()));
    588     } else if (!hasDynamicState) {
    589         GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(),
    590                                                  rt, pipeline.proxy()->origin(),
    591                                                  pipeline.getScissorState().rect());
    592     }
    593     GrVkPipeline::SetDynamicViewportState(fGpu, cbInfo.currentCmdBuf(), rt);
    594     GrVkPipeline::SetDynamicBlendConstantState(fGpu, cbInfo.currentCmdBuf(), rt->config(),
    595                                                pipeline.getXferProcessor());
    596 
    597     return pipelineState;
    598 }
    599 
    600 static void prepare_sampled_images(const GrResourceIOProcessor& processor,
    601                                    SkTArray<GrVkImage*>* sampledImages,
    602                                    GrVkGpu* gpu) {
    603     for (int i = 0; i < processor.numTextureSamplers(); ++i) {
    604         const GrResourceIOProcessor::TextureSampler& sampler = processor.textureSampler(i);
    605         GrVkTexture* vkTexture = static_cast<GrVkTexture*>(sampler.peekTexture());
    606 
    607         // We may need to resolve the texture first if it is also a render target
    608         GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(vkTexture->asRenderTarget());
    609         if (texRT) {
    610             gpu->onResolveRenderTarget(texRT, sampler.proxy()->origin());
    611         }
    612 
    613         // Check if we need to regenerate any mip maps
    614         if (GrSamplerState::Filter::kMipMap == sampler.samplerState().filter()) {
    615             if (vkTexture->texturePriv().mipMapsAreDirty()) {
    616                 gpu->generateMipmap(vkTexture, sampler.proxy()->origin());
    617                 vkTexture->texturePriv().markMipMapsClean();
    618             }
    619         }
    620         sampledImages->push_back(vkTexture);
    621     }
    622 }
    623 
    624 void GrVkGpuRTCommandBuffer::onDraw(const GrPipeline& pipeline,
    625                                     const GrPrimitiveProcessor& primProc,
    626                                     const GrMesh meshes[],
    627                                     const GrPipeline::DynamicState dynamicStates[],
    628                                     int meshCount,
    629                                     const SkRect& bounds) {
    630     SkASSERT(pipeline.renderTarget() == fRenderTarget);
    631 
    632     if (!meshCount) {
    633         return;
    634     }
    635 
    636     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
    637 
    638     prepare_sampled_images(primProc, &cbInfo.fSampledImages, fGpu);
    639     GrFragmentProcessor::Iter iter(pipeline);
    640     while (const GrFragmentProcessor* fp = iter.next()) {
    641         prepare_sampled_images(*fp, &cbInfo.fSampledImages, fGpu);
    642     }
    643     if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
    644         cbInfo.fSampledImages.push_back(static_cast<GrVkTexture*>(dstTexture));
    645     }
    646 
    647     GrPrimitiveType primitiveType = meshes[0].primitiveType();
    648     GrVkPipelineState* pipelineState = this->prepareDrawState(pipeline,
    649                                                               primProc,
    650                                                               primitiveType,
    651                                                               SkToBool(dynamicStates));
    652     if (!pipelineState) {
    653         return;
    654     }
    655 
    656     for (int i = 0; i < meshCount; ++i) {
    657         const GrMesh& mesh = meshes[i];
    658         if (mesh.primitiveType() != primitiveType) {
    659             // Technically we don't have to call this here (since there is a safety check in
    660             // pipelineState:setData but this will allow for quicker freeing of resources if the
    661             // pipelineState sits in a cache for a while.
    662             pipelineState->freeTempResources(fGpu);
    663             SkDEBUGCODE(pipelineState = nullptr);
    664             primitiveType = mesh.primitiveType();
    665             pipelineState = this->prepareDrawState(pipeline,
    666                                                    primProc,
    667                                                    primitiveType,
    668                                                    SkToBool(dynamicStates));
    669             if (!pipelineState) {
    670                 return;
    671             }
    672         }
    673 
    674         if (dynamicStates) {
    675             if (pipeline.getScissorState().enabled()) {
    676                 GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(),
    677                                                          fRenderTarget, pipeline.proxy()->origin(),
    678                                                          dynamicStates[i].fScissorRect);
    679             }
    680         }
    681 
    682         SkASSERT(pipelineState);
    683         mesh.sendToGpu(primProc, this);
    684     }
    685 
    686     cbInfo.fBounds.join(bounds);
    687     cbInfo.fIsEmpty = false;
    688 
    689     // Technically we don't have to call this here (since there is a safety check in
    690     // pipelineState:setData but this will allow for quicker freeing of resources if the
    691     // pipelineState sits in a cache for a while.
    692     pipelineState->freeTempResources(fGpu);
    693 }
    694 
    695 void GrVkGpuRTCommandBuffer::sendInstancedMeshToGpu(const GrPrimitiveProcessor& primProc,
    696                                                     GrPrimitiveType,
    697                                                     const GrBuffer* vertexBuffer,
    698                                                     int vertexCount,
    699                                                     int baseVertex,
    700                                                     const GrBuffer* instanceBuffer,
    701                                                     int instanceCount,
    702                                                     int baseInstance) {
    703     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
    704     this->bindGeometry(primProc, nullptr, vertexBuffer, instanceBuffer);
    705     cbInfo.currentCmdBuf()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
    706     fGpu->stats()->incNumDraws();
    707 }
    708 
    709 void GrVkGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(const GrPrimitiveProcessor& primProc,
    710                                                            GrPrimitiveType,
    711                                                            const GrBuffer* indexBuffer,
    712                                                            int indexCount,
    713                                                            int baseIndex,
    714                                                            const GrBuffer* vertexBuffer,
    715                                                            int baseVertex,
    716                                                            const GrBuffer* instanceBuffer,
    717                                                            int instanceCount,
    718                                                            int baseInstance) {
    719     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
    720     this->bindGeometry(primProc, indexBuffer, vertexBuffer, instanceBuffer);
    721     cbInfo.currentCmdBuf()->drawIndexed(fGpu, indexCount, instanceCount,
    722                                         baseIndex, baseVertex, baseInstance);
    723     fGpu->stats()->incNumDraws();
    724 }
    725 
    726