Home | History | Annotate | Download | only in vk
      1 /*
      2 * Copyright 2016 Google Inc.
      3 *
      4 * Use of this source code is governed by a BSD-style license that can be
      5 * found in the LICENSE file.
      6 */
      7 
      8 #include "GrVkResourceProvider.h"
      9 
     10 #include "GrContextPriv.h"
     11 #include "GrSamplerState.h"
     12 #include "GrVkCommandBuffer.h"
     13 #include "GrVkCommandPool.h"
     14 #include "GrVkCopyPipeline.h"
     15 #include "GrVkGpu.h"
     16 #include "GrVkPipeline.h"
     17 #include "GrVkRenderTarget.h"
     18 #include "GrVkUniformBuffer.h"
     19 #include "GrVkUtil.h"
     20 #include "SkTaskGroup.h"
     21 
     22 #ifdef SK_TRACE_VK_RESOURCES
     23 std::atomic<uint32_t> GrVkResource::fKeyCounter{0};
     24 #endif
     25 
     26 GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
     27     : fGpu(gpu)
     28     , fPipelineCache(VK_NULL_HANDLE) {
     29     fPipelineStateCache = new PipelineStateCache(gpu);
     30 }
     31 
     32 GrVkResourceProvider::~GrVkResourceProvider() {
     33     SkASSERT(0 == fRenderPassArray.count());
     34     SkASSERT(0 == fExternalRenderPasses.count());
     35     SkASSERT(VK_NULL_HANDLE == fPipelineCache);
     36     delete fPipelineStateCache;
     37 }
     38 
     39 VkPipelineCache GrVkResourceProvider::pipelineCache() {
     40     if (fPipelineCache == VK_NULL_HANDLE) {
     41         VkPipelineCacheCreateInfo createInfo;
     42         memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
     43         createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
     44         createInfo.pNext = nullptr;
     45         createInfo.flags = 0;
     46 
     47         auto persistentCache = fGpu->getContext()->contextPriv().getPersistentCache();
     48         sk_sp<SkData> cached;
     49         if (persistentCache) {
     50             uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
     51             sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
     52             cached = persistentCache->load(*keyData);
     53         }
     54         bool usedCached = false;
     55         if (cached) {
     56             uint32_t* cacheHeader = (uint32_t*)cached->data();
     57             if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
     58                 // For version one of the header, the total header size is 16 bytes plus
     59                 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
     60                 // the breakdown of these bytes.
     61                 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
     62                 const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
     63                 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
     64                 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
     65                     !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
     66                     createInfo.initialDataSize = cached->size();
     67                     createInfo.pInitialData = cached->data();
     68                     usedCached = true;
     69                 }
     70             }
     71         }
     72         if (!usedCached) {
     73             createInfo.initialDataSize = 0;
     74             createInfo.pInitialData = nullptr;
     75         }
     76         VkResult result = GR_VK_CALL(fGpu->vkInterface(),
     77                                      CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
     78                                                          &fPipelineCache));
     79         SkASSERT(VK_SUCCESS == result);
     80         if (VK_SUCCESS != result) {
     81             fPipelineCache = VK_NULL_HANDLE;
     82         }
     83     }
     84     return fPipelineCache;
     85 }
     86 
     87 void GrVkResourceProvider::init() {
     88     // Init uniform descriptor objects
     89     GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
     90     fDescriptorSetManagers.emplace_back(dsm);
     91     SkASSERT(1 == fDescriptorSetManagers.count());
     92     fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
     93 }
     94 
     95 GrVkPipeline* GrVkResourceProvider::createPipeline(int numColorSamples,
     96                                                    const GrPrimitiveProcessor& primProc,
     97                                                    const GrPipeline& pipeline,
     98                                                    const GrStencilSettings& stencil,
     99                                                    VkPipelineShaderStageCreateInfo* shaderStageInfo,
    100                                                    int shaderStageCount,
    101                                                    GrPrimitiveType primitiveType,
    102                                                    VkRenderPass compatibleRenderPass,
    103                                                    VkPipelineLayout layout) {
    104     return GrVkPipeline::Create(fGpu, numColorSamples, primProc, pipeline, stencil, shaderStageInfo,
    105                                 shaderStageCount, primitiveType, compatibleRenderPass, layout,
    106                                 this->pipelineCache());
    107 }
    108 
    109 GrVkCopyPipeline* GrVkResourceProvider::findOrCreateCopyPipeline(
    110         const GrVkRenderTarget* dst,
    111         VkPipelineShaderStageCreateInfo* shaderStageInfo,
    112         VkPipelineLayout pipelineLayout) {
    113     // Find or Create a compatible pipeline
    114     GrVkCopyPipeline* pipeline = nullptr;
    115     for (int i = 0; i < fCopyPipelines.count() && !pipeline; ++i) {
    116         if (fCopyPipelines[i]->isCompatible(*dst->simpleRenderPass())) {
    117             pipeline = fCopyPipelines[i];
    118         }
    119     }
    120     if (!pipeline) {
    121         pipeline = GrVkCopyPipeline::Create(fGpu, shaderStageInfo,
    122                                             pipelineLayout,
    123                                             dst->numColorSamples(),
    124                                             *dst->simpleRenderPass(),
    125                                             this->pipelineCache());
    126         if (!pipeline) {
    127             return nullptr;
    128         }
    129         fCopyPipelines.push_back(pipeline);
    130     }
    131     SkASSERT(pipeline);
    132     pipeline->ref();
    133     return pipeline;
    134 }
    135 
    136 // To create framebuffers, we first need to create a simple RenderPass that is
    137 // only used for framebuffer creation. When we actually render we will create
    138 // RenderPasses as needed that are compatible with the framebuffer.
    139 const GrVkRenderPass*
    140 GrVkResourceProvider::findCompatibleRenderPass(const GrVkRenderTarget& target,
    141                                                CompatibleRPHandle* compatibleHandle) {
    142     for (int i = 0; i < fRenderPassArray.count(); ++i) {
    143         if (fRenderPassArray[i].isCompatible(target)) {
    144             const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
    145             renderPass->ref();
    146             if (compatibleHandle) {
    147                 *compatibleHandle = CompatibleRPHandle(i);
    148             }
    149             return renderPass;
    150         }
    151     }
    152 
    153     const GrVkRenderPass* renderPass =
    154         fRenderPassArray.emplace_back(fGpu, target).getCompatibleRenderPass();
    155     renderPass->ref();
    156 
    157     if (compatibleHandle) {
    158         *compatibleHandle = CompatibleRPHandle(fRenderPassArray.count() - 1);
    159     }
    160     return renderPass;
    161 }
    162 
    163 const GrVkRenderPass*
    164 GrVkResourceProvider::findCompatibleRenderPass(const CompatibleRPHandle& compatibleHandle) {
    165     SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
    166     int index = compatibleHandle.toIndex();
    167     const GrVkRenderPass* renderPass = fRenderPassArray[index].getCompatibleRenderPass();
    168     renderPass->ref();
    169     return renderPass;
    170 }
    171 
    172 const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
    173         VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
    174     for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
    175         if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
    176             fExternalRenderPasses[i]->ref();
    177 #ifdef SK_DEBUG
    178             uint32_t cachedColorIndex;
    179             SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
    180             SkASSERT(cachedColorIndex == colorAttachmentIndex);
    181 #endif
    182             return fExternalRenderPasses[i];
    183         }
    184     }
    185 
    186     const GrVkRenderPass* newRenderPass = new GrVkRenderPass(renderPass, colorAttachmentIndex);
    187     fExternalRenderPasses.push_back(newRenderPass);
    188     newRenderPass->ref();
    189     return newRenderPass;
    190 }
    191 
    192 const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
    193                                                      const GrVkRenderTarget& target,
    194                                                      const GrVkRenderPass::LoadStoreOps& colorOps,
    195                                                      const GrVkRenderPass::LoadStoreOps& stencilOps,
    196                                                      CompatibleRPHandle* compatibleHandle) {
    197     GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
    198     GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
    199                                                                            : &tempRPHandle;
    200     *pRPHandle = target.compatibleRenderPassHandle();
    201 
    202     // This will get us the handle to (and possible create) the compatible set for the specific
    203     // GrVkRenderPass we are looking for.
    204     this->findCompatibleRenderPass(target, compatibleHandle);
    205     return this->findRenderPass(*pRPHandle, colorOps, stencilOps);
    206 }
    207 
    208 const GrVkRenderPass*
    209 GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
    210                                      const GrVkRenderPass::LoadStoreOps& colorOps,
    211                                      const GrVkRenderPass::LoadStoreOps& stencilOps) {
    212     SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
    213     CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
    214     const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
    215                                                                    colorOps,
    216                                                                    stencilOps);
    217     renderPass->ref();
    218     return renderPass;
    219 }
    220 
    221 GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
    222                                                             VkDescriptorType type, uint32_t count) {
    223     return new GrVkDescriptorPool(fGpu, type, count);
    224 }
    225 
    226 GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
    227         const GrSamplerState& params, const GrVkYcbcrConversionInfo& ycbcrInfo) {
    228     GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
    229     if (!sampler) {
    230         sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
    231         if (!sampler) {
    232             return nullptr;
    233         }
    234         fSamplers.add(sampler);
    235     }
    236     SkASSERT(sampler);
    237     sampler->ref();
    238     return sampler;
    239 }
    240 
    241 GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
    242         const GrVkYcbcrConversionInfo& ycbcrInfo) {
    243     GrVkSamplerYcbcrConversion* ycbcrConversion =
    244             fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
    245     if (!ycbcrConversion) {
    246         ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
    247         if (!ycbcrConversion) {
    248             return nullptr;
    249         }
    250         fYcbcrConversions.add(ycbcrConversion);
    251     }
    252     SkASSERT(ycbcrConversion);
    253     ycbcrConversion->ref();
    254     return ycbcrConversion;
    255 }
    256 
    257 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
    258         GrRenderTarget* renderTarget, GrSurfaceOrigin origin,
    259         const GrPipeline& pipeline, const GrPrimitiveProcessor& proc,
    260         const GrTextureProxy* const primProcProxies[], GrPrimitiveType primitiveType,
    261         VkRenderPass compatibleRenderPass) {
    262     return fPipelineStateCache->refPipelineState(renderTarget, origin, proc, primProcProxies,
    263                                                  pipeline, primitiveType, compatibleRenderPass);
    264 }
    265 
    266 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
    267                                                          const GrVkUniformHandler& uniformHandler,
    268                                                          GrVkDescriptorSetManager::Handle* handle) {
    269     SkASSERT(handle);
    270     SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
    271              VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
    272     for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
    273         if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
    274            *handle = GrVkDescriptorSetManager::Handle(i);
    275            return;
    276         }
    277     }
    278 
    279     GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
    280                                                                                    uniformHandler);
    281     fDescriptorSetManagers.emplace_back(dsm);
    282     *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
    283 }
    284 
    285 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
    286                                                          const SkTArray<uint32_t>& visibilities,
    287                                                          GrVkDescriptorSetManager::Handle* handle) {
    288     SkASSERT(handle);
    289     SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
    290              VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
    291     for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
    292         if (fDescriptorSetManagers[i]->isCompatible(type, visibilities)) {
    293             *handle = GrVkDescriptorSetManager::Handle(i);
    294             return;
    295         }
    296     }
    297 
    298     GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
    299                                                                                    visibilities);
    300     fDescriptorSetManagers.emplace_back(dsm);
    301     *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
    302 }
    303 
    304 VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
    305     SkASSERT(fUniformDSHandle.isValid());
    306     return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
    307 }
    308 
    309 VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
    310         const GrVkDescriptorSetManager::Handle& handle) const {
    311     SkASSERT(handle.isValid());
    312     return fDescriptorSetManagers[handle.toIndex()]->layout();
    313 }
    314 
    315 const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
    316     SkASSERT(fUniformDSHandle.isValid());
    317     return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
    318                                                                                 fUniformDSHandle);
    319 }
    320 
    321 const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
    322         const GrVkDescriptorSetManager::Handle& handle) {
    323     SkASSERT(handle.isValid());
    324     return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
    325 }
    326 
    327 void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
    328                                                 const GrVkDescriptorSetManager::Handle& handle) {
    329     SkASSERT(descSet);
    330     SkASSERT(handle.isValid());
    331     int managerIdx = handle.toIndex();
    332     SkASSERT(managerIdx < fDescriptorSetManagers.count());
    333     fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
    334 }
    335 
    336 GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
    337     std::unique_lock<std::recursive_mutex> lock(fBackgroundMutex);
    338     GrVkCommandPool* result;
    339     if (fAvailableCommandPools.count()) {
    340         result = fAvailableCommandPools.back();
    341         fAvailableCommandPools.pop_back();
    342     } else {
    343         result = GrVkCommandPool::Create(fGpu);
    344     }
    345     SkASSERT(result->unique());
    346     SkDEBUGCODE(
    347         for (const GrVkCommandPool* pool : fActiveCommandPools) {
    348             SkASSERT(pool != result);
    349         }
    350         for (const GrVkCommandPool* pool : fAvailableCommandPools) {
    351             SkASSERT(pool != result);
    352         }
    353     )
    354     fActiveCommandPools.push_back(result);
    355     result->ref();
    356     return result;
    357 }
    358 
    359 void GrVkResourceProvider::checkCommandBuffers() {
    360     for (int i = fActiveCommandPools.count() - 1; i >= 0; --i) {
    361         GrVkCommandPool* pool = fActiveCommandPools[i];
    362         if (!pool->isOpen()) {
    363             GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
    364             if (buffer->finished(fGpu)) {
    365                 fActiveCommandPools.removeShuffle(i);
    366                 this->backgroundReset(pool);
    367             }
    368         }
    369     }
    370 }
    371 
    372 const GrVkResource* GrVkResourceProvider::findOrCreateStandardUniformBufferResource() {
    373     const GrVkResource* resource = nullptr;
    374     int count = fAvailableUniformBufferResources.count();
    375     if (count > 0) {
    376         resource = fAvailableUniformBufferResources[count - 1];
    377         fAvailableUniformBufferResources.removeShuffle(count - 1);
    378     } else {
    379         resource = GrVkUniformBuffer::CreateResource(fGpu, GrVkUniformBuffer::kStandardSize);
    380     }
    381     return resource;
    382 }
    383 
    384 void GrVkResourceProvider::recycleStandardUniformBufferResource(const GrVkResource* resource) {
    385     fAvailableUniformBufferResources.push_back(resource);
    386 }
    387 
    388 void GrVkResourceProvider::destroyResources(bool deviceLost) {
    389     SkTaskGroup* taskGroup = fGpu->getContext()->contextPriv().getTaskGroup();
    390     if (taskGroup) {
    391         taskGroup->wait();
    392     }
    393 
    394     // Release all copy pipelines
    395     for (int i = 0; i < fCopyPipelines.count(); ++i) {
    396         fCopyPipelines[i]->unref(fGpu);
    397     }
    398 
    399     // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
    400     for (int i = 0; i < fRenderPassArray.count(); ++i) {
    401         fRenderPassArray[i].releaseResources(fGpu);
    402     }
    403     fRenderPassArray.reset();
    404 
    405     for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
    406         fExternalRenderPasses[i]->unref(fGpu);
    407     }
    408     fExternalRenderPasses.reset();
    409 
    410     // Iterate through all store GrVkSamplers and unref them before resetting the hash.
    411     SkTDynamicHash<GrVkSampler, GrVkSampler::Key>::Iter iter(&fSamplers);
    412     for (; !iter.done(); ++iter) {
    413         (*iter).unref(fGpu);
    414     }
    415     fSamplers.reset();
    416 
    417     fPipelineStateCache->release();
    418 
    419     GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
    420     fPipelineCache = VK_NULL_HANDLE;
    421 
    422     for (GrVkCommandPool* pool : fActiveCommandPools) {
    423         SkASSERT(pool->unique());
    424         pool->unref(fGpu);
    425     }
    426     fActiveCommandPools.reset();
    427 
    428     for (GrVkCommandPool* pool : fAvailableCommandPools) {
    429         SkASSERT(pool->unique());
    430         pool->unref(fGpu);
    431     }
    432     fAvailableCommandPools.reset();
    433 
    434     // We must release/destroy all command buffers and pipeline states before releasing the
    435     // GrVkDescriptorSetManagers
    436     for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
    437         fDescriptorSetManagers[i]->release(fGpu);
    438     }
    439     fDescriptorSetManagers.reset();
    440 
    441     // release our uniform buffers
    442     for (int i = 0; i < fAvailableUniformBufferResources.count(); ++i) {
    443         SkASSERT(fAvailableUniformBufferResources[i]->unique());
    444         fAvailableUniformBufferResources[i]->unref(fGpu);
    445     }
    446     fAvailableUniformBufferResources.reset();
    447 }
    448 
    449 void GrVkResourceProvider::abandonResources() {
    450     SkTaskGroup* taskGroup = fGpu->getContext()->contextPriv().getTaskGroup();
    451     if (taskGroup) {
    452         taskGroup->wait();
    453     }
    454 
    455     // Abandon all command pools
    456     for (int i = 0; i < fActiveCommandPools.count(); ++i) {
    457         SkASSERT(fActiveCommandPools[i]->unique());
    458         fActiveCommandPools[i]->unrefAndAbandon();
    459     }
    460     fActiveCommandPools.reset();
    461     for (int i = 0; i < fAvailableCommandPools.count(); ++i) {
    462         SkASSERT(fAvailableCommandPools[i]->unique());
    463         fAvailableCommandPools[i]->unrefAndAbandon();
    464     }
    465     fAvailableCommandPools.reset();
    466 
    467     // Abandon all copy pipelines
    468     for (int i = 0; i < fCopyPipelines.count(); ++i) {
    469         fCopyPipelines[i]->unrefAndAbandon();
    470     }
    471 
    472     // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
    473     for (int i = 0; i < fRenderPassArray.count(); ++i) {
    474         fRenderPassArray[i].abandonResources();
    475     }
    476     fRenderPassArray.reset();
    477 
    478     for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
    479         fExternalRenderPasses[i]->unrefAndAbandon();
    480     }
    481     fExternalRenderPasses.reset();
    482 
    483     // Iterate through all store GrVkSamplers and unrefAndAbandon them before resetting the hash.
    484     SkTDynamicHash<GrVkSampler, GrVkSampler::Key>::Iter iter(&fSamplers);
    485     for (; !iter.done(); ++iter) {
    486         (*iter).unrefAndAbandon();
    487     }
    488     fSamplers.reset();
    489 
    490     fPipelineStateCache->abandon();
    491 
    492     fPipelineCache = VK_NULL_HANDLE;
    493 
    494     // We must abandon all command buffers and pipeline states before abandoning the
    495     // GrVkDescriptorSetManagers
    496     for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
    497         fDescriptorSetManagers[i]->abandon();
    498     }
    499     fDescriptorSetManagers.reset();
    500 
    501     // release our uniform buffers
    502     for (int i = 0; i < fAvailableUniformBufferResources.count(); ++i) {
    503         SkASSERT(fAvailableUniformBufferResources[i]->unique());
    504         fAvailableUniformBufferResources[i]->unrefAndAbandon();
    505     }
    506     fAvailableUniformBufferResources.reset();
    507 }
    508 
    509 void GrVkResourceProvider::backgroundReset(GrVkCommandPool* pool) {
    510     SkASSERT(pool->unique());
    511     pool->releaseResources(fGpu);
    512     SkTaskGroup* taskGroup = fGpu->getContext()->contextPriv().getTaskGroup();
    513     if (taskGroup) {
    514         taskGroup->add([this, pool]() {
    515             this->reset(pool);
    516         });
    517     } else {
    518         this->reset(pool);
    519     }
    520 }
    521 
    522 void GrVkResourceProvider::reset(GrVkCommandPool* pool) {
    523     SkASSERT(pool->unique());
    524     pool->reset(fGpu);
    525     std::unique_lock<std::recursive_mutex> providerLock(fBackgroundMutex);
    526     fAvailableCommandPools.push_back(pool);
    527 }
    528 
    529 void GrVkResourceProvider::storePipelineCacheData() {
    530     size_t dataSize = 0;
    531     VkResult result = GR_VK_CALL(fGpu->vkInterface(), GetPipelineCacheData(fGpu->device(),
    532                                                                            this->pipelineCache(),
    533                                                                            &dataSize, nullptr));
    534     SkASSERT(result == VK_SUCCESS);
    535 
    536     std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
    537 
    538     result = GR_VK_CALL(fGpu->vkInterface(), GetPipelineCacheData(fGpu->device(),
    539                                                                   this->pipelineCache(),
    540                                                                   &dataSize,
    541                                                                   (void*)data.get()));
    542     SkASSERT(result == VK_SUCCESS);
    543 
    544     uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
    545     sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
    546 
    547     fGpu->getContext()->contextPriv().getPersistentCache()->store(
    548             *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize));
    549 }
    550 
    551 ////////////////////////////////////////////////////////////////////////////////
    552 
    553 GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(
    554                                                                      const GrVkGpu* gpu,
    555                                                                      const GrVkRenderTarget& target)
    556     : fLastReturnedIndex(0) {
    557     fRenderPasses.emplace_back(new GrVkRenderPass());
    558     fRenderPasses[0]->initSimple(gpu, target);
    559 }
    560 
    561 bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
    562                                                              const GrVkRenderTarget& target) const {
    563     // The first GrVkRenderpass should always exists since we create the basic load store
    564     // render pass on create
    565     SkASSERT(fRenderPasses[0]);
    566     return fRenderPasses[0]->isCompatible(target);
    567 }
    568 
    569 GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
    570                                                    const GrVkGpu* gpu,
    571                                                    const GrVkRenderPass::LoadStoreOps& colorOps,
    572                                                    const GrVkRenderPass::LoadStoreOps& stencilOps) {
    573     for (int i = 0; i < fRenderPasses.count(); ++i) {
    574         int idx = (i + fLastReturnedIndex) % fRenderPasses.count();
    575         if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, stencilOps)) {
    576             fLastReturnedIndex = idx;
    577             return fRenderPasses[idx];
    578         }
    579     }
    580     GrVkRenderPass* renderPass = fRenderPasses.emplace_back(new GrVkRenderPass());
    581     renderPass->init(gpu, *this->getCompatibleRenderPass(), colorOps, stencilOps);
    582     fLastReturnedIndex = fRenderPasses.count() - 1;
    583     return renderPass;
    584 }
    585 
    586 void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources(GrVkGpu* gpu) {
    587     for (int i = 0; i < fRenderPasses.count(); ++i) {
    588         if (fRenderPasses[i]) {
    589             fRenderPasses[i]->unref(gpu);
    590             fRenderPasses[i] = nullptr;
    591         }
    592     }
    593 }
    594 
    595 void GrVkResourceProvider::CompatibleRenderPassSet::abandonResources() {
    596     for (int i = 0; i < fRenderPasses.count(); ++i) {
    597         if (fRenderPasses[i]) {
    598             fRenderPasses[i]->unrefAndAbandon();
    599             fRenderPasses[i] = nullptr;
    600         }
    601     }
    602 }
    603