Home | History | Annotate | Download | only in gpu
      1 /*
      2  * Copyright 2015 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #include "GrResourceProvider.h"
      9 
     10 #include "GrBackendSemaphore.h"
     11 #include "GrBuffer.h"
     12 #include "GrCaps.h"
     13 #include "GrContext.h"
     14 #include "GrContextPriv.h"
     15 #include "GrGpu.h"
     16 #include "GrPath.h"
     17 #include "GrPathRendering.h"
     18 #include "GrProxyProvider.h"
     19 #include "GrRenderTargetPriv.h"
     20 #include "GrResourceCache.h"
     21 #include "GrResourceKey.h"
     22 #include "GrSemaphore.h"
     23 #include "GrStencilAttachment.h"
     24 #include "GrTexturePriv.h"
     25 #include "../private/GrSingleOwner.h"
     26 #include "SkGr.h"
     27 #include "SkMathPriv.h"
     28 
     29 GR_DECLARE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
     30 
     31 const uint32_t GrResourceProvider::kMinScratchTextureSize = 16;
     32 
     33 #ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
     34 static const bool kDefaultExplicitlyAllocateGPUResources = false;
     35 #else
     36 static const bool kDefaultExplicitlyAllocateGPUResources = true;
     37 #endif
     38 
     39 #define ASSERT_SINGLE_OWNER \
     40     SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
     41 
     42 GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner,
     43                                        GrContextOptions::Enable explicitlyAllocateGPUResources)
     44         : fCache(cache)
     45         , fGpu(gpu)
     46 #ifdef SK_DEBUG
     47         , fSingleOwner(owner)
     48 #endif
     49 {
     50     if (GrContextOptions::Enable::kNo == explicitlyAllocateGPUResources) {
     51         fExplicitlyAllocateGPUResources = false;
     52     } else if (GrContextOptions::Enable::kYes == explicitlyAllocateGPUResources) {
     53         fExplicitlyAllocateGPUResources = true;
     54     } else {
     55         fExplicitlyAllocateGPUResources = kDefaultExplicitlyAllocateGPUResources;
     56     }
     57 
     58     fCaps = sk_ref_sp(fGpu->caps());
     59 
     60     GR_DEFINE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
     61     fQuadIndexBufferKey = gQuadIndexBufferKey;
     62 }
     63 
     64 sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
     65                                                    const GrMipLevel texels[], int mipLevelCount) {
     66     ASSERT_SINGLE_OWNER
     67 
     68     SkASSERT(mipLevelCount > 0);
     69 
     70     if (this->isAbandoned()) {
     71         return nullptr;
     72     }
     73 
     74     GrMipMapped mipMapped = mipLevelCount > 1 ? GrMipMapped::kYes : GrMipMapped::kNo;
     75     if (!fCaps->validateSurfaceDesc(desc, mipMapped)) {
     76         return nullptr;
     77     }
     78 
     79     return fGpu->createTexture(desc, budgeted, texels, mipLevelCount);
     80 }
     81 
     82 sk_sp<GrTexture> GrResourceProvider::getExactScratch(const GrSurfaceDesc& desc,
     83                                                      SkBudgeted budgeted, Flags flags) {
     84     sk_sp<GrTexture> tex(this->refScratchTexture(desc, flags));
     85     if (tex && SkBudgeted::kNo == budgeted) {
     86         tex->resourcePriv().makeUnbudgeted();
     87     }
     88 
     89     return tex;
     90 }
     91 
     92 sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc,
     93                                                    SkBudgeted budgeted,
     94                                                    SkBackingFit fit,
     95                                                    const GrMipLevel& mipLevel,
     96                                                    Flags flags) {
     97     ASSERT_SINGLE_OWNER
     98 
     99     if (this->isAbandoned()) {
    100         return nullptr;
    101     }
    102 
    103     if (!mipLevel.fPixels) {
    104         return nullptr;
    105     }
    106 
    107     if (!fCaps->validateSurfaceDesc(desc, GrMipMapped::kNo)) {
    108         return nullptr;
    109     }
    110 
    111     GrContext* context = fGpu->getContext();
    112     GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider();
    113 
    114     SkColorType colorType;
    115     if (GrPixelConfigToColorType(desc.fConfig, &colorType)) {
    116         sk_sp<GrTexture> tex = (SkBackingFit::kApprox == fit)
    117                 ? this->createApproxTexture(desc, flags)
    118                 : this->createTexture(desc, budgeted, flags);
    119         if (!tex) {
    120             return nullptr;
    121         }
    122 
    123         sk_sp<GrTextureProxy> proxy = proxyProvider->createWrapped(std::move(tex),
    124                                                                    kTopLeft_GrSurfaceOrigin);
    125         if (!proxy) {
    126             return nullptr;
    127         }
    128         auto srcInfo = SkImageInfo::Make(desc.fWidth, desc.fHeight, colorType,
    129                                          kUnknown_SkAlphaType);
    130         sk_sp<GrSurfaceContext> sContext = context->contextPriv().makeWrappedSurfaceContext(
    131                 std::move(proxy));
    132         if (!sContext) {
    133             return nullptr;
    134         }
    135         SkAssertResult(sContext->writePixels(srcInfo, mipLevel.fPixels, mipLevel.fRowBytes, 0, 0));
    136         return sk_ref_sp(sContext->asTextureProxy()->peekTexture());
    137     } else {
    138         return fGpu->createTexture(desc, budgeted, &mipLevel, 1);
    139     }
    140 }
    141 
    142 sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
    143                                                    Flags flags) {
    144     ASSERT_SINGLE_OWNER
    145     if (this->isAbandoned()) {
    146         return nullptr;
    147     }
    148 
    149     if (!fCaps->validateSurfaceDesc(desc, GrMipMapped::kNo)) {
    150         return nullptr;
    151     }
    152 
    153     // Compressed textures are read-only so they don't support re-use for scratch.
    154     if (!GrPixelConfigIsCompressed(desc.fConfig)) {
    155         sk_sp<GrTexture> tex = this->getExactScratch(desc, budgeted, flags);
    156         if (tex) {
    157             return tex;
    158         }
    159     }
    160 
    161     return fGpu->createTexture(desc, budgeted);
    162 }
    163 
    164 sk_sp<GrTexture> GrResourceProvider::createApproxTexture(const GrSurfaceDesc& desc,
    165                                                          Flags flags) {
    166     ASSERT_SINGLE_OWNER
    167     SkASSERT(Flags::kNone == flags || Flags::kNoPendingIO == flags);
    168 
    169     if (this->isAbandoned()) {
    170         return nullptr;
    171     }
    172 
    173     // Currently we don't recycle compressed textures as scratch.
    174     if (GrPixelConfigIsCompressed(desc.fConfig)) {
    175         return nullptr;
    176     }
    177 
    178     if (!fCaps->validateSurfaceDesc(desc, GrMipMapped::kNo)) {
    179         return nullptr;
    180     }
    181 
    182     if (auto tex = this->refScratchTexture(desc, flags)) {
    183         return tex;
    184     }
    185 
    186     SkTCopyOnFirstWrite<GrSurfaceDesc> copyDesc(desc);
    187 
    188     // bin by pow2 with a reasonable min
    189     if (!SkToBool(desc.fFlags & kPerformInitialClear_GrSurfaceFlag) &&
    190         (fGpu->caps()->reuseScratchTextures() || (desc.fFlags & kRenderTarget_GrSurfaceFlag))) {
    191         GrSurfaceDesc* wdesc = copyDesc.writable();
    192         wdesc->fWidth  = SkTMax(kMinScratchTextureSize, GrNextPow2(desc.fWidth));
    193         wdesc->fHeight = SkTMax(kMinScratchTextureSize, GrNextPow2(desc.fHeight));
    194     }
    195 
    196     if (auto tex = this->refScratchTexture(*copyDesc, flags)) {
    197         return tex;
    198     }
    199 
    200     return fGpu->createTexture(*copyDesc, SkBudgeted::kYes);
    201 }
    202 
    203 sk_sp<GrTexture> GrResourceProvider::refScratchTexture(const GrSurfaceDesc& desc, Flags flags) {
    204     ASSERT_SINGLE_OWNER
    205     SkASSERT(!this->isAbandoned());
    206     SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
    207     SkASSERT(fCaps->validateSurfaceDesc(desc, GrMipMapped::kNo));
    208 
    209     // We could make initial clears work with scratch textures but it is a rare case so we just opt
    210     // to fall back to making a new texture.
    211     if (!SkToBool(desc.fFlags & kPerformInitialClear_GrSurfaceFlag) &&
    212         (fGpu->caps()->reuseScratchTextures() || (desc.fFlags & kRenderTarget_GrSurfaceFlag))) {
    213 
    214         GrScratchKey key;
    215         GrTexturePriv::ComputeScratchKey(desc, &key);
    216         auto scratchFlags = GrResourceCache::ScratchFlags::kNone;
    217         if (Flags::kNoPendingIO & flags) {
    218             scratchFlags |= GrResourceCache::ScratchFlags::kRequireNoPendingIO;
    219         } else  if (!(desc.fFlags & kRenderTarget_GrSurfaceFlag)) {
    220             // If it is not a render target then it will most likely be populated by
    221             // writePixels() which will trigger a flush if the texture has pending IO.
    222             scratchFlags |= GrResourceCache::ScratchFlags::kPreferNoPendingIO;
    223         }
    224         GrGpuResource* resource = fCache->findAndRefScratchResource(key,
    225                                                                     GrSurface::WorstCaseSize(desc),
    226                                                                     scratchFlags);
    227         if (resource) {
    228             GrSurface* surface = static_cast<GrSurface*>(resource);
    229             return sk_sp<GrTexture>(surface->asTexture());
    230         }
    231     }
    232 
    233     return nullptr;
    234 }
    235 
    236 sk_sp<GrTexture> GrResourceProvider::wrapBackendTexture(const GrBackendTexture& tex,
    237                                                         GrWrapOwnership ownership,
    238                                                         GrWrapCacheable cacheable,
    239                                                         GrIOType ioType) {
    240     ASSERT_SINGLE_OWNER
    241     if (this->isAbandoned()) {
    242         return nullptr;
    243     }
    244     return fGpu->wrapBackendTexture(tex, ownership, cacheable, ioType);
    245 }
    246 
    247 sk_sp<GrTexture> GrResourceProvider::wrapRenderableBackendTexture(const GrBackendTexture& tex,
    248                                                                   int sampleCnt,
    249                                                                   GrWrapOwnership ownership,
    250                                                                   GrWrapCacheable cacheable) {
    251     ASSERT_SINGLE_OWNER
    252     if (this->isAbandoned()) {
    253         return nullptr;
    254     }
    255     return fGpu->wrapRenderableBackendTexture(tex, sampleCnt, ownership, cacheable);
    256 }
    257 
    258 sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendRenderTarget(
    259         const GrBackendRenderTarget& backendRT)
    260 {
    261     ASSERT_SINGLE_OWNER
    262     return this->isAbandoned() ? nullptr : fGpu->wrapBackendRenderTarget(backendRT);
    263 }
    264 
    265 sk_sp<GrRenderTarget> GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget(
    266         const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
    267     ASSERT_SINGLE_OWNER
    268     return this->isAbandoned() ? nullptr : fGpu->wrapVulkanSecondaryCBAsRenderTarget(imageInfo,
    269                                                                                      vkInfo);
    270 
    271 }
    272 
    273 void GrResourceProvider::assignUniqueKeyToResource(const GrUniqueKey& key,
    274                                                    GrGpuResource* resource) {
    275     ASSERT_SINGLE_OWNER
    276     if (this->isAbandoned() || !resource) {
    277         return;
    278     }
    279     resource->resourcePriv().setUniqueKey(key);
    280 }
    281 
    282 sk_sp<GrGpuResource> GrResourceProvider::findResourceByUniqueKey(const GrUniqueKey& key) {
    283     ASSERT_SINGLE_OWNER
    284     return this->isAbandoned() ? nullptr
    285                                : sk_sp<GrGpuResource>(fCache->findAndRefUniqueResource(key));
    286 }
    287 
    288 sk_sp<const GrBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrBufferType intendedType,
    289                                                                  size_t size,
    290                                                                  const void* data,
    291                                                                  const GrUniqueKey& key) {
    292     if (auto buffer = this->findByUniqueKey<GrBuffer>(key)) {
    293         return std::move(buffer);
    294     }
    295     if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, Flags::kNone,
    296                                          data)) {
    297         // We shouldn't bin and/or cache static buffers.
    298         SkASSERT(buffer->sizeInBytes() == size);
    299         SkASSERT(!buffer->resourcePriv().getScratchKey().isValid());
    300         SkASSERT(!buffer->resourcePriv().hasPendingIO_debugOnly());
    301         buffer->resourcePriv().setUniqueKey(key);
    302         return sk_sp<const GrBuffer>(buffer);
    303     }
    304     return nullptr;
    305 }
    306 
    307 sk_sp<const GrBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern,
    308                                                                      int patternSize,
    309                                                                      int reps,
    310                                                                      int vertCount,
    311                                                                      const GrUniqueKey& key) {
    312     size_t bufferSize = patternSize * reps * sizeof(uint16_t);
    313 
    314     // This is typically used in GrMeshDrawOps, so we assume kNoPendingIO.
    315     sk_sp<GrBuffer> buffer(this->createBuffer(bufferSize, kIndex_GrBufferType,
    316                                               kStatic_GrAccessPattern, Flags::kNone));
    317     if (!buffer) {
    318         return nullptr;
    319     }
    320     uint16_t* data = (uint16_t*) buffer->map();
    321     SkAutoTArray<uint16_t> temp;
    322     if (!data) {
    323         temp.reset(reps * patternSize);
    324         data = temp.get();
    325     }
    326     for (int i = 0; i < reps; ++i) {
    327         int baseIdx = i * patternSize;
    328         uint16_t baseVert = (uint16_t)(i * vertCount);
    329         for (int j = 0; j < patternSize; ++j) {
    330             data[baseIdx+j] = baseVert + pattern[j];
    331         }
    332     }
    333     if (temp.get()) {
    334         if (!buffer->updateData(data, bufferSize)) {
    335             return nullptr;
    336         }
    337     } else {
    338         buffer->unmap();
    339     }
    340     this->assignUniqueKeyToResource(key, buffer.get());
    341     return std::move(buffer);
    342 }
    343 
    344 static constexpr int kMaxQuads = 1 << 12;  // max possible: (1 << 14) - 1;
    345 
    346 sk_sp<const GrBuffer> GrResourceProvider::createQuadIndexBuffer() {
    347     GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
    348     static const uint16_t kPattern[] = { 0, 1, 2, 2, 1, 3 };
    349     return this->createPatternedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey);
    350 }
    351 
    352 int GrResourceProvider::QuadCountOfQuadBuffer() { return kMaxQuads; }
    353 
    354 sk_sp<GrPath> GrResourceProvider::createPath(const SkPath& path, const GrStyle& style) {
    355     if (this->isAbandoned()) {
    356         return nullptr;
    357     }
    358 
    359     SkASSERT(this->gpu()->pathRendering());
    360     return this->gpu()->pathRendering()->createPath(path, style);
    361 }
    362 
    363 sk_sp<GrBuffer> GrResourceProvider::createBuffer(size_t size, GrBufferType intendedType,
    364                                                  GrAccessPattern accessPattern, Flags flags,
    365                                                  const void* data) {
    366     if (this->isAbandoned()) {
    367         return nullptr;
    368     }
    369     if (kDynamic_GrAccessPattern != accessPattern) {
    370         return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
    371     }
    372     if (!(flags & Flags::kRequireGpuMemory) &&
    373         this->gpu()->caps()->preferClientSideDynamicBuffers() &&
    374         GrBufferTypeIsVertexOrIndex(intendedType) &&
    375         kDynamic_GrAccessPattern == accessPattern) {
    376         return GrBuffer::MakeCPUBacked(this->gpu(), size, intendedType, data);
    377     }
    378 
    379     // bin by pow2 with a reasonable min
    380     static const size_t MIN_SIZE = 1 << 12;
    381     size_t allocSize = SkTMax(MIN_SIZE, GrNextSizePow2(size));
    382 
    383     GrScratchKey key;
    384     GrBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
    385     auto scratchFlags = GrResourceCache::ScratchFlags::kNone;
    386     if (flags & Flags::kNoPendingIO) {
    387         scratchFlags = GrResourceCache::ScratchFlags::kRequireNoPendingIO;
    388     } else {
    389         scratchFlags = GrResourceCache::ScratchFlags::kPreferNoPendingIO;
    390     }
    391     auto buffer = sk_sp<GrBuffer>(static_cast<GrBuffer*>(
    392             this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags)));
    393     if (!buffer) {
    394         buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern);
    395         if (!buffer) {
    396             return nullptr;
    397         }
    398     }
    399     if (data) {
    400         buffer->updateData(data, size);
    401     }
    402     SkASSERT(!buffer->isCPUBacked()); // We should only cache real VBOs.
    403     return buffer;
    404 }
    405 
    406 bool GrResourceProvider::attachStencilAttachment(GrRenderTarget* rt) {
    407     SkASSERT(rt);
    408     if (rt->renderTargetPriv().getStencilAttachment()) {
    409         return true;
    410     }
    411 
    412     if (!rt->wasDestroyed() && rt->canAttemptStencilAttachment()) {
    413         GrUniqueKey sbKey;
    414 
    415         int width = rt->width();
    416         int height = rt->height();
    417 #if 0
    418         if (this->caps()->oversizedStencilSupport()) {
    419             width  = SkNextPow2(width);
    420             height = SkNextPow2(height);
    421         }
    422 #endif
    423         GrStencilAttachment::ComputeSharedStencilAttachmentKey(width, height,
    424                                                                rt->numStencilSamples(), &sbKey);
    425         auto stencil = this->findByUniqueKey<GrStencilAttachment>(sbKey);
    426         if (!stencil) {
    427             // Need to try and create a new stencil
    428             stencil.reset(this->gpu()->createStencilAttachmentForRenderTarget(rt, width, height));
    429             if (!stencil) {
    430                 return false;
    431             }
    432             this->assignUniqueKeyToResource(sbKey, stencil.get());
    433         }
    434         rt->renderTargetPriv().attachStencilAttachment(std::move(stencil));
    435     }
    436     return SkToBool(rt->renderTargetPriv().getStencilAttachment());
    437 }
    438 
    439 sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendTextureAsRenderTarget(
    440         const GrBackendTexture& tex, int sampleCnt)
    441 {
    442     if (this->isAbandoned()) {
    443         return nullptr;
    444     }
    445     return fGpu->wrapBackendTextureAsRenderTarget(tex, sampleCnt);
    446 }
    447 
    448 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrResourceProvider::makeSemaphore(bool isOwned) {
    449     return fGpu->makeSemaphore(isOwned);
    450 }
    451 
    452 sk_sp<GrSemaphore> GrResourceProvider::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
    453                                                             SemaphoreWrapType wrapType,
    454                                                             GrWrapOwnership ownership) {
    455     ASSERT_SINGLE_OWNER
    456     return this->isAbandoned() ? nullptr : fGpu->wrapBackendSemaphore(semaphore,
    457                                                                       wrapType,
    458                                                                       ownership);
    459 }
    460