Home | History | Annotate | Download | only in gpu
      1 /*
      2  * Copyright 2015 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #include "GrDrawingManager.h"
      9 
     10 #include "GrBackendSemaphore.h"
     11 #include "GrContext.h"
     12 #include "GrContextPriv.h"
     13 #include "GrGpu.h"
     14 #include "GrOnFlushResourceProvider.h"
     15 #include "GrOpList.h"
     16 #include "GrRenderTargetContext.h"
     17 #include "GrPathRenderingRenderTargetContext.h"
     18 #include "GrRenderTargetProxy.h"
     19 #include "GrResourceAllocator.h"
     20 #include "GrResourceProvider.h"
     21 #include "GrSoftwarePathRenderer.h"
     22 #include "GrSurfaceProxyPriv.h"
     23 #include "GrTextureContext.h"
     24 #include "GrTextureOpList.h"
     25 #include "GrTextureProxy.h"
     26 #include "GrTextureProxyPriv.h"
     27 
     28 #include "SkDeferredDisplayList.h"
     29 #include "SkSurface_Gpu.h"
     30 #include "SkTTopoSort.h"
     31 
     32 #include "GrTracing.h"
     33 #include "text/GrAtlasTextContext.h"
     34 #include "text/GrStencilAndCoverTextContext.h"
     35 
     36 void GrDrawingManager::cleanup() {
     37     for (int i = 0; i < fOpLists.count(); ++i) {
     38         // no opList should receive a new command after this
     39         fOpLists[i]->makeClosed(*fContext->caps());
     40 
     41         // We shouldn't need to do this, but it turns out some clients still hold onto opLists
     42         // after a cleanup.
     43         // MDB TODO: is this still true?
     44         if (!fOpLists[i]->unique()) {
     45             // TODO: Eventually this should be guaranteed unique.
     46             // https://bugs.chromium.org/p/skia/issues/detail?id=7111
     47             fOpLists[i]->endFlush();
     48         }
     49     }
     50 
     51     fOpLists.reset();
     52 
     53     delete fPathRendererChain;
     54     fPathRendererChain = nullptr;
     55     SkSafeSetNull(fSoftwarePathRenderer);
     56 
     57     fOnFlushCBObjects.reset();
     58 }
     59 
     60 GrDrawingManager::~GrDrawingManager() {
     61     this->cleanup();
     62 }
     63 
     64 void GrDrawingManager::abandon() {
     65     fAbandoned = true;
     66     this->cleanup();
     67 }
     68 
     69 void GrDrawingManager::freeGpuResources() {
     70     for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
     71         if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
     72             // it's safe to just do this because we're iterating in reverse
     73             fOnFlushCBObjects.removeShuffle(i);
     74         }
     75     }
     76 
     77     // a path renderer may be holding onto resources
     78     delete fPathRendererChain;
     79     fPathRendererChain = nullptr;
     80     SkSafeSetNull(fSoftwarePathRenderer);
     81 }
     82 
     83 // MDB TODO: make use of the 'proxy' parameter.
     84 GrSemaphoresSubmitted GrDrawingManager::internalFlush(GrSurfaceProxy*,
     85                                                       GrResourceCache::FlushType type,
     86                                                       int numSemaphores,
     87                                                       GrBackendSemaphore backendSemaphores[]) {
     88     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "internalFlush", fContext);
     89 
     90     if (fFlushing || this->wasAbandoned()) {
     91         return GrSemaphoresSubmitted::kNo;
     92     }
     93     fFlushing = true;
     94 
     95     for (int i = 0; i < fOpLists.count(); ++i) {
     96         // Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh
     97         // needs to flush mid-draw. In that case, the SkGpuDevice's GrOpLists won't be closed
     98         // but need to be flushed anyway. Closing such GrOpLists here will mean new
     99         // GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again.
    100         fOpLists[i]->makeClosed(*fContext->caps());
    101     }
    102 
    103 #ifdef SK_DEBUG
    104     // This block checks for any unnecessary splits in the opLists. If two sequential opLists
    105     // share the same backing GrSurfaceProxy it means the opList was artificially split.
    106     if (fOpLists.count()) {
    107         GrRenderTargetOpList* prevOpList = fOpLists[0]->asRenderTargetOpList();
    108         for (int i = 1; i < fOpLists.count(); ++i) {
    109             GrRenderTargetOpList* curOpList = fOpLists[i]->asRenderTargetOpList();
    110 
    111             if (prevOpList && curOpList) {
    112                 SkASSERT(prevOpList->fTarget.get() != curOpList->fTarget.get());
    113             }
    114 
    115             prevOpList = curOpList;
    116         }
    117     }
    118 #endif
    119 
    120 #ifndef SK_DISABLE_RENDER_TARGET_SORTING
    121     SkDEBUGCODE(bool result =)
    122                         SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
    123     SkASSERT(result);
    124 #endif
    125 
    126     GrGpu* gpu = fContext->contextPriv().getGpu();
    127 
    128     GrOpFlushState flushState(gpu, fContext->contextPriv().resourceProvider(),
    129                               &fTokenTracker);
    130 
    131     GrOnFlushResourceProvider onFlushProvider(this);
    132     // TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the
    133     // stack here is to preserve the flush tokens.
    134 
    135     // Prepare any onFlush op lists (e.g. atlases).
    136     if (!fOnFlushCBObjects.empty()) {
    137         fFlushingOpListIDs.reset(fOpLists.count());
    138         for (int i = 0; i < fOpLists.count(); ++i) {
    139             fFlushingOpListIDs[i] = fOpLists[i]->uniqueID();
    140         }
    141         SkSTArray<4, sk_sp<GrRenderTargetContext>> renderTargetContexts;
    142         for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
    143             onFlushCBObject->preFlush(&onFlushProvider,
    144                                       fFlushingOpListIDs.begin(), fFlushingOpListIDs.count(),
    145                                       &renderTargetContexts);
    146             for (const sk_sp<GrRenderTargetContext>& rtc : renderTargetContexts) {
    147                 sk_sp<GrRenderTargetOpList> onFlushOpList = sk_ref_sp(rtc->getRTOpList());
    148                 if (!onFlushOpList) {
    149                     continue;   // Odd - but not a big deal
    150                 }
    151 #ifdef SK_DEBUG
    152                 // OnFlush callbacks are already invoked during flush, and are therefore expected to
    153                 // handle resource allocation & usage on their own. (No deferred or lazy proxies!)
    154                 onFlushOpList->visitProxies_debugOnly([](GrSurfaceProxy* p) {
    155                     SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
    156                     SkASSERT(GrSurfaceProxy::LazyState::kNot == p->lazyInstantiationState());
    157                 });
    158 #endif
    159                 onFlushOpList->makeClosed(*fContext->caps());
    160                 onFlushOpList->prepare(&flushState);
    161                 fOnFlushCBOpLists.push_back(std::move(onFlushOpList));
    162             }
    163             renderTargetContexts.reset();
    164         }
    165     }
    166 
    167 #if 0
    168     // Enable this to print out verbose GrOp information
    169     for (int i = 0; i < fOpLists.count(); ++i) {
    170         SkDEBUGCODE(fOpLists[i]->dump();)
    171     }
    172 #endif
    173 
    174     int startIndex, stopIndex;
    175     bool flushed = false;
    176 
    177     {
    178         GrResourceAllocator alloc(fContext->contextPriv().resourceProvider());
    179         for (int i = 0; i < fOpLists.count(); ++i) {
    180             fOpLists[i]->gatherProxyIntervals(&alloc);
    181             alloc.markEndOfOpList(i);
    182         }
    183 
    184 #ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
    185         startIndex = 0;
    186         stopIndex = fOpLists.count();
    187 #else
    188         GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
    189         while (alloc.assign(&startIndex, &stopIndex, &error))
    190 #endif
    191         {
    192 #ifndef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
    193             if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
    194                 for (int i = startIndex; i < stopIndex; ++i) {
    195                     fOpLists[i]->purgeOpsWithUninstantiatedProxies();
    196                 }
    197             }
    198 #endif
    199             if (this->executeOpLists(startIndex, stopIndex, &flushState)) {
    200                 flushed = true;
    201             }
    202         }
    203     }
    204 
    205     fOpLists.reset();
    206 
    207     GrSemaphoresSubmitted result = gpu->finishFlush(numSemaphores, backendSemaphores);
    208 
    209     // We always have to notify the cache when it requested a flush so it can reset its state.
    210     if (flushed || type == GrResourceCache::FlushType::kCacheRequested) {
    211         fContext->contextPriv().getResourceCache()->notifyFlushOccurred(type);
    212     }
    213     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
    214         onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingOpListIDs.begin(),
    215                                    fFlushingOpListIDs.count());
    216     }
    217     fFlushingOpListIDs.reset();
    218     fFlushing = false;
    219 
    220     return result;
    221 }
    222 
    223 bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushState* flushState) {
    224     SkASSERT(startIndex <= stopIndex && stopIndex <= fOpLists.count());
    225 
    226     bool anyOpListsExecuted = false;
    227 
    228     for (int i = startIndex; i < stopIndex; ++i) {
    229         if (!fOpLists[i]) {
    230              continue;
    231         }
    232 
    233 #ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
    234         if (!fOpLists[i]->instantiate(fContext->contextPriv().resourceProvider())) {
    235             SkDebugf("OpList failed to instantiate.\n");
    236             fOpLists[i] = nullptr;
    237             continue;
    238         }
    239 #else
    240         SkASSERT(fOpLists[i]->isInstantiated());
    241 #endif
    242 
    243         // TODO: handle this instantiation via lazy surface proxies?
    244         // Instantiate all deferred proxies (being built on worker threads) so we can upload them
    245         fOpLists[i]->instantiateDeferredProxies(fContext->contextPriv().resourceProvider());
    246         fOpLists[i]->prepare(flushState);
    247     }
    248 
    249     // Upload all data to the GPU
    250     flushState->preExecuteDraws();
    251 
    252     // Execute the onFlush op lists first, if any.
    253     for (sk_sp<GrOpList>& onFlushOpList : fOnFlushCBOpLists) {
    254         if (!onFlushOpList->execute(flushState)) {
    255             SkDebugf("WARNING: onFlushOpList failed to execute.\n");
    256         }
    257         SkASSERT(onFlushOpList->unique());
    258         onFlushOpList = nullptr;
    259     }
    260     fOnFlushCBOpLists.reset();
    261 
    262     // Execute the normal op lists.
    263     for (int i = startIndex; i < stopIndex; ++i) {
    264         if (!fOpLists[i]) {
    265             continue;
    266         }
    267 
    268         if (fOpLists[i]->execute(flushState)) {
    269             anyOpListsExecuted = true;
    270         }
    271     }
    272 
    273     SkASSERT(!flushState->commandBuffer());
    274     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
    275 
    276     // We reset the flush state before the OpLists so that the last resources to be freed are those
    277     // that are written to in the OpLists. This helps to make sure the most recently used resources
    278     // are the last to be purged by the resource cache.
    279     flushState->reset();
    280 
    281     for (int i = startIndex; i < stopIndex; ++i) {
    282         if (!fOpLists[i]) {
    283             continue;
    284         }
    285         if (!fOpLists[i]->unique()) {
    286             // TODO: Eventually this should be guaranteed unique.
    287             // https://bugs.chromium.org/p/skia/issues/detail?id=7111
    288             fOpLists[i]->endFlush();
    289         }
    290         fOpLists[i] = nullptr;
    291     }
    292 
    293     return anyOpListsExecuted;
    294 }
    295 
    296 GrSemaphoresSubmitted GrDrawingManager::prepareSurfaceForExternalIO(
    297         GrSurfaceProxy* proxy, int numSemaphores, GrBackendSemaphore backendSemaphores[]) {
    298     if (this->wasAbandoned()) {
    299         return GrSemaphoresSubmitted::kNo;
    300     }
    301     SkASSERT(proxy);
    302 
    303     GrSemaphoresSubmitted result = GrSemaphoresSubmitted::kNo;
    304     if (proxy->priv().hasPendingIO() || numSemaphores) {
    305         result = this->flush(proxy, numSemaphores, backendSemaphores);
    306     }
    307 
    308     if (!proxy->instantiate(fContext->contextPriv().resourceProvider())) {
    309         return result;
    310     }
    311 
    312     GrGpu* gpu = fContext->contextPriv().getGpu();
    313     GrSurface* surface = proxy->priv().peekSurface();
    314 
    315     if (gpu && surface->asRenderTarget()) {
    316         gpu->resolveRenderTarget(surface->asRenderTarget(), proxy->origin());
    317     }
    318     return result;
    319 }
    320 
    321 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
    322     fOnFlushCBObjects.push_back(onFlushCBObject);
    323 }
    324 
    325 void GrDrawingManager::moveOpListsToDDL(SkDeferredDisplayList* ddl) {
    326 #ifndef SK_RASTER_RECORDER_IMPLEMENTATION
    327     for (int i = 0; i < fOpLists.count(); ++i) {
    328         // no opList should receive a new command after this
    329         fOpLists[i]->makeClosed(*fContext->caps());
    330     }
    331 
    332     ddl->fOpLists = std::move(fOpLists);
    333 #endif
    334 }
    335 
    336 void GrDrawingManager::copyOpListsFromDDL(const SkDeferredDisplayList* ddl,
    337                                           GrRenderTargetProxy* newDest) {
    338 #ifndef SK_RASTER_RECORDER_IMPLEMENTATION
    339     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
    340     // The lazy proxy that references it (in the copied opLists) will steal its GrTexture.
    341     ddl->fLazyProxyData->fReplayDest = newDest;
    342     fOpLists.push_back_n(ddl->fOpLists.count(), ddl->fOpLists.begin());
    343 #endif
    344 }
    345 
    346 sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(GrRenderTargetProxy* rtp,
    347                                                           bool managedOpList) {
    348     SkASSERT(fContext);
    349 
    350     // This is  a temporary fix for the partial-MDB world. In that world we're not reordering
    351     // so ops that (in the single opList world) would've just glommed onto the end of the single
    352     // opList but referred to a far earlier RT need to appear in their own opList.
    353     if (!fOpLists.empty()) {
    354         fOpLists.back()->makeClosed(*fContext->caps());
    355     }
    356 
    357     auto resourceProvider = fContext->contextPriv().resourceProvider();
    358 
    359     sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(rtp,
    360                                                                 resourceProvider,
    361                                                                 fContext->getAuditTrail()));
    362     SkASSERT(rtp->getLastOpList() == opList.get());
    363 
    364     if (managedOpList) {
    365         fOpLists.push_back() = opList;
    366     }
    367 
    368     return opList;
    369 }
    370 
    371 sk_sp<GrTextureOpList> GrDrawingManager::newTextureOpList(GrTextureProxy* textureProxy) {
    372     SkASSERT(fContext);
    373 
    374     // This is  a temporary fix for the partial-MDB world. In that world we're not reordering
    375     // so ops that (in the single opList world) would've just glommed onto the end of the single
    376     // opList but referred to a far earlier RT need to appear in their own opList.
    377     if (!fOpLists.empty()) {
    378         fOpLists.back()->makeClosed(*fContext->caps());
    379     }
    380 
    381     sk_sp<GrTextureOpList> opList(new GrTextureOpList(fContext->contextPriv().resourceProvider(),
    382                                                       textureProxy,
    383                                                       fContext->getAuditTrail()));
    384 
    385     SkASSERT(textureProxy->getLastOpList() == opList.get());
    386 
    387     fOpLists.push_back() = opList;
    388 
    389     return opList;
    390 }
    391 
    392 GrAtlasTextContext* GrDrawingManager::getAtlasTextContext() {
    393     if (!fAtlasTextContext) {
    394         fAtlasTextContext = GrAtlasTextContext::Make(fOptionsForAtlasTextContext);
    395     }
    396 
    397     return fAtlasTextContext.get();
    398 }
    399 
    400 /*
    401  * This method finds a path renderer that can draw the specified path on
    402  * the provided target.
    403  * Due to its expense, the software path renderer has split out so it can
    404  * can be individually allowed/disallowed via the "allowSW" boolean.
    405  */
    406 GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
    407                                                   bool allowSW,
    408                                                   GrPathRendererChain::DrawType drawType,
    409                                                   GrPathRenderer::StencilSupport* stencilSupport) {
    410 
    411     if (!fPathRendererChain) {
    412         fPathRendererChain = new GrPathRendererChain(fContext, fOptionsForPathRendererChain);
    413     }
    414 
    415     GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
    416     if (!pr && allowSW) {
    417         if (!fSoftwarePathRenderer) {
    418             fSoftwarePathRenderer =
    419                     new GrSoftwarePathRenderer(fContext->contextPriv().proxyProvider(),
    420                                                fOptionsForPathRendererChain.fAllowPathMaskCaching);
    421         }
    422         if (GrPathRenderer::CanDrawPath::kNo != fSoftwarePathRenderer->canDrawPath(args)) {
    423             pr = fSoftwarePathRenderer;
    424         }
    425     }
    426 
    427     return pr;
    428 }
    429 
    430 GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
    431     if (!fPathRendererChain) {
    432         fPathRendererChain = new GrPathRendererChain(fContext, fOptionsForPathRendererChain);
    433     }
    434     return fPathRendererChain->getCoverageCountingPathRenderer();
    435 }
    436 
    437 sk_sp<GrRenderTargetContext> GrDrawingManager::makeRenderTargetContext(
    438                                                             sk_sp<GrSurfaceProxy> sProxy,
    439                                                             sk_sp<SkColorSpace> colorSpace,
    440                                                             const SkSurfaceProps* surfaceProps,
    441                                                             bool managedOpList) {
    442     if (this->wasAbandoned() || !sProxy->asRenderTargetProxy()) {
    443         return nullptr;
    444     }
    445 
    446     // SkSurface catches bad color space usage at creation. This check handles anything that slips
    447     // by, including internal usage. We allow a null color space here, for read/write pixels and
    448     // other special code paths. If a color space is provided, though, enforce all other rules.
    449     if (colorSpace && !SkSurface_Gpu::Valid(fContext, sProxy->config(), colorSpace.get())) {
    450         SkDEBUGFAIL("Invalid config and colorspace combination");
    451         return nullptr;
    452     }
    453 
    454     sk_sp<GrRenderTargetProxy> rtp(sk_ref_sp(sProxy->asRenderTargetProxy()));
    455 
    456     bool useDIF = false;
    457     if (surfaceProps) {
    458         useDIF = surfaceProps->isUseDeviceIndependentFonts();
    459     }
    460 
    461     if (useDIF && fContext->caps()->shaderCaps()->pathRenderingSupport() &&
    462         GrFSAAType::kNone != rtp->fsaaType()) {
    463 
    464         return sk_sp<GrRenderTargetContext>(new GrPathRenderingRenderTargetContext(
    465                                                     fContext, this, std::move(rtp),
    466                                                     std::move(colorSpace), surfaceProps,
    467                                                     fContext->getAuditTrail(), fSingleOwner));
    468     }
    469 
    470     return sk_sp<GrRenderTargetContext>(new GrRenderTargetContext(fContext, this, std::move(rtp),
    471                                                                   std::move(colorSpace),
    472                                                                   surfaceProps,
    473                                                                   fContext->getAuditTrail(),
    474                                                                   fSingleOwner, managedOpList));
    475 }
    476 
    477 sk_sp<GrTextureContext> GrDrawingManager::makeTextureContext(sk_sp<GrSurfaceProxy> sProxy,
    478                                                              sk_sp<SkColorSpace> colorSpace) {
    479     if (this->wasAbandoned() || !sProxy->asTextureProxy()) {
    480         return nullptr;
    481     }
    482 
    483     // SkSurface catches bad color space usage at creation. This check handles anything that slips
    484     // by, including internal usage. We allow a null color space here, for read/write pixels and
    485     // other special code paths. If a color space is provided, though, enforce all other rules.
    486     if (colorSpace && !SkSurface_Gpu::Valid(fContext, sProxy->config(), colorSpace.get())) {
    487         SkDEBUGFAIL("Invalid config and colorspace combination");
    488         return nullptr;
    489     }
    490 
    491     // GrTextureRenderTargets should always be using GrRenderTargetContext
    492     SkASSERT(!sProxy->asRenderTargetProxy());
    493 
    494     sk_sp<GrTextureProxy> textureProxy(sk_ref_sp(sProxy->asTextureProxy()));
    495 
    496     return sk_sp<GrTextureContext>(new GrTextureContext(fContext, this, std::move(textureProxy),
    497                                                         std::move(colorSpace),
    498                                                         fContext->getAuditTrail(),
    499                                                         fSingleOwner));
    500 }
    501