Home | History | Annotate | Download | only in ccpr
      1 /*
      2  * Copyright 2017 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #include "GrCoverageCountingPathRenderer.h"
      9 
     10 #include "GrCaps.h"
     11 #include "GrClip.h"
     12 #include "GrGpu.h"
     13 #include "GrGpuCommandBuffer.h"
     14 #include "GrOpFlushState.h"
     15 #include "GrProxyProvider.h"
     16 #include "GrRenderTargetOpList.h"
     17 #include "GrStyle.h"
     18 #include "GrTexture.h"
     19 #include "SkMakeUnique.h"
     20 #include "SkMatrix.h"
     21 #include "SkPathOps.h"
     22 #include "ccpr/GrCCClipProcessor.h"
     23 
     24 // Shorthand for keeping line lengths under control with nested classes...
     25 using CCPR = GrCoverageCountingPathRenderer;
     26 
     27 // If a path spans more pixels than this, we need to crop it or else analytic AA can run out of fp32
     28 // precision.
     29 static constexpr float kPathCropThreshold = 1 << 16;
     30 
     31 static void crop_path(const SkPath& path, const SkIRect& cropbox, SkPath* out) {
     32     SkPath cropPath;
     33     cropPath.addRect(SkRect::Make(cropbox));
     34     if (!Op(cropPath, path, kIntersect_SkPathOp, out)) {
     35         // This can fail if the PathOps encounter NaN or infinities.
     36         out->reset();
     37     }
     38 }
     39 
     40 bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) {
     41     const GrShaderCaps& shaderCaps = *caps.shaderCaps();
     42     return shaderCaps.integerSupport() && shaderCaps.flatInterpolationSupport() &&
     43            caps.instanceAttribSupport() && GrCaps::kNone_MapFlags != caps.mapBufferFlags() &&
     44            caps.isConfigTexturable(kAlpha_half_GrPixelConfig) &&
     45            caps.isConfigRenderable(kAlpha_half_GrPixelConfig) &&
     46            !caps.blacklistCoverageCounting();
     47 }
     48 
     49 sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
     50         const GrCaps& caps, bool drawCachablePaths) {
     51     auto ccpr = IsSupported(caps) ? new GrCoverageCountingPathRenderer(drawCachablePaths) : nullptr;
     52     return sk_sp<GrCoverageCountingPathRenderer>(ccpr);
     53 }
     54 
     55 GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
     56         const CanDrawPathArgs& args) const {
     57     if (args.fShape->hasUnstyledKey() && !fDrawCachablePaths) {
     58         return CanDrawPath::kNo;
     59     }
     60 
     61     if (!args.fShape->style().isSimpleFill() || args.fShape->inverseFilled() ||
     62         args.fViewMatrix->hasPerspective() || GrAAType::kCoverage != args.fAAType) {
     63         return CanDrawPath::kNo;
     64     }
     65 
     66     SkPath path;
     67     args.fShape->asPath(&path);
     68     if (SkPathPriv::ConicWeightCnt(path)) {
     69         return CanDrawPath::kNo;
     70     }
     71 
     72     SkRect devBounds;
     73     SkIRect devIBounds;
     74     args.fViewMatrix->mapRect(&devBounds, path.getBounds());
     75     devBounds.roundOut(&devIBounds);
     76     if (!devIBounds.intersect(*args.fClipConservativeBounds)) {
     77         // Path is completely clipped away. Our code will eventually notice this before doing any
     78         // real work.
     79         return CanDrawPath::kYes;
     80     }
     81 
     82     if (devIBounds.height() * devIBounds.width() > 256 * 256) {
     83         // Large paths can blow up the atlas fast. And they are not ideal for a two-pass rendering
     84         // algorithm. Give the simpler direct renderers a chance before we commit to drawing it.
     85         return CanDrawPath::kAsBackup;
     86     }
     87 
     88     if (args.fShape->hasUnstyledKey() && path.countVerbs() > 50) {
     89         // Complex paths do better cached in an SDF, if the renderer will accept them.
     90         return CanDrawPath::kAsBackup;
     91     }
     92 
     93     return CanDrawPath::kYes;
     94 }
     95 
     96 bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
     97     SkASSERT(!fFlushing);
     98     auto op = skstd::make_unique<DrawPathsOp>(this, args, args.fPaint.getColor());
     99     args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
    100     return true;
    101 }
    102 
    103 CCPR::DrawPathsOp::DrawPathsOp(GrCoverageCountingPathRenderer* ccpr, const DrawPathArgs& args,
    104                                GrColor color)
    105         : INHERITED(ClassID())
    106         , fCCPR(ccpr)
    107         , fSRGBFlags(GrPipeline::SRGBFlagsFromPaint(args.fPaint))
    108         , fProcessors(std::move(args.fPaint))
    109         , fTailDraw(&fHeadDraw)
    110         , fOwningRTPendingPaths(nullptr) {
    111     SkDEBUGCODE(++fCCPR->fPendingDrawOpsCount);
    112     SkDEBUGCODE(fBaseInstance = -1);
    113     SkDEBUGCODE(fInstanceCount = 1);
    114     SkDEBUGCODE(fNumSkippedInstances = 0);
    115     GrRenderTargetContext* const rtc = args.fRenderTargetContext;
    116 
    117     SkRect devBounds;
    118     args.fViewMatrix->mapRect(&devBounds, args.fShape->bounds());
    119     args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &fHeadDraw.fClipIBounds,
    120                                       nullptr);
    121     if (SkTMax(devBounds.height(), devBounds.width()) > kPathCropThreshold) {
    122         // The path is too large. We need to crop it or analytic AA can run out of fp32 precision.
    123         SkPath path;
    124         args.fShape->asPath(&path);
    125         path.transform(*args.fViewMatrix);
    126         fHeadDraw.fMatrix.setIdentity();
    127         crop_path(path, fHeadDraw.fClipIBounds, &fHeadDraw.fPath);
    128         devBounds = fHeadDraw.fPath.getBounds();
    129     } else {
    130         fHeadDraw.fMatrix = *args.fViewMatrix;
    131         args.fShape->asPath(&fHeadDraw.fPath);
    132     }
    133     fHeadDraw.fColor = color;  // Can't call args.fPaint.getColor() because it has been std::move'd.
    134 
    135     // FIXME: intersect with clip bounds to (hopefully) improve batching.
    136     // (This is nontrivial due to assumptions in generating the octagon cover geometry.)
    137     this->setBounds(devBounds, GrOp::HasAABloat::kYes, GrOp::IsZeroArea::kNo);
    138 }
    139 
    140 CCPR::DrawPathsOp::~DrawPathsOp() {
    141     if (fOwningRTPendingPaths) {
    142         // Remove CCPR's dangling pointer to this Op before deleting it.
    143         fOwningRTPendingPaths->fDrawOps.remove(this);
    144     }
    145     SkDEBUGCODE(--fCCPR->fPendingDrawOpsCount);
    146 }
    147 
    148 GrDrawOp::RequiresDstTexture CCPR::DrawPathsOp::finalize(const GrCaps& caps,
    149                                                          const GrAppliedClip* clip,
    150                                                          GrPixelConfigIsClamped dstIsClamped) {
    151     SkASSERT(!fCCPR->fFlushing);
    152     // There should only be one single path draw in this Op right now.
    153     SkASSERT(1 == fInstanceCount);
    154     SkASSERT(&fHeadDraw == fTailDraw);
    155     GrProcessorSet::Analysis analysis =
    156             fProcessors.finalize(fHeadDraw.fColor, GrProcessorAnalysisCoverage::kSingleChannel,
    157                                  clip, false, caps, dstIsClamped, &fHeadDraw.fColor);
    158     return analysis.requiresDstTexture() ? RequiresDstTexture::kYes : RequiresDstTexture::kNo;
    159 }
    160 
    161 bool CCPR::DrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps& caps) {
    162     DrawPathsOp* that = op->cast<DrawPathsOp>();
    163     SkASSERT(fCCPR == that->fCCPR);
    164     SkASSERT(!fCCPR->fFlushing);
    165     SkASSERT(fOwningRTPendingPaths);
    166     SkASSERT(fInstanceCount);
    167     SkASSERT(!that->fOwningRTPendingPaths || that->fOwningRTPendingPaths == fOwningRTPendingPaths);
    168     SkASSERT(that->fInstanceCount);
    169 
    170     if (this->getFillType() != that->getFillType() || fSRGBFlags != that->fSRGBFlags ||
    171         fProcessors != that->fProcessors) {
    172         return false;
    173     }
    174 
    175     fTailDraw->fNext = &fOwningRTPendingPaths->fDrawsAllocator.push_back(that->fHeadDraw);
    176     fTailDraw = (that->fTailDraw == &that->fHeadDraw) ? fTailDraw->fNext : that->fTailDraw;
    177 
    178     this->joinBounds(*that);
    179 
    180     SkDEBUGCODE(fInstanceCount += that->fInstanceCount);
    181     SkDEBUGCODE(that->fInstanceCount = 0);
    182     return true;
    183 }
    184 
    185 void CCPR::DrawPathsOp::wasRecorded(GrRenderTargetOpList* opList) {
    186     SkASSERT(!fCCPR->fFlushing);
    187     SkASSERT(!fOwningRTPendingPaths);
    188     fOwningRTPendingPaths = &fCCPR->fRTPendingPathsMap[opList->uniqueID()];
    189     fOwningRTPendingPaths->fDrawOps.addToTail(this);
    190 }
    191 
    192 bool GrCoverageCountingPathRenderer::canMakeClipProcessor(const SkPath& deviceSpacePath) const {
    193     if (!fDrawCachablePaths && !deviceSpacePath.isVolatile()) {
    194         return false;
    195     }
    196 
    197     if (SkPathPriv::ConicWeightCnt(deviceSpacePath)) {
    198         return false;
    199     }
    200 
    201     return true;
    202 }
    203 
    204 std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
    205         GrProxyProvider* proxyProvider,
    206         uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
    207         int rtWidth, int rtHeight) {
    208     using MustCheckBounds = GrCCClipProcessor::MustCheckBounds;
    209 
    210     SkASSERT(!fFlushing);
    211     SkASSERT(this->canMakeClipProcessor(deviceSpacePath));
    212 
    213     ClipPath& clipPath = fRTPendingPathsMap[opListID].fClipPaths[deviceSpacePath.getGenerationID()];
    214     if (clipPath.isUninitialized()) {
    215         // This ClipPath was just created during lookup. Initialize it.
    216         clipPath.init(proxyProvider, deviceSpacePath, accessRect, rtWidth, rtHeight);
    217     } else {
    218         clipPath.addAccess(accessRect);
    219     }
    220 
    221     bool mustCheckBounds = !clipPath.pathDevIBounds().contains(accessRect);
    222     return skstd::make_unique<GrCCClipProcessor>(&clipPath, MustCheckBounds(mustCheckBounds),
    223                                                  deviceSpacePath.getFillType());
    224 }
    225 
    226 void CCPR::ClipPath::init(GrProxyProvider* proxyProvider,
    227                           const SkPath& deviceSpacePath, const SkIRect& accessRect,
    228                           int rtWidth, int rtHeight) {
    229     SkASSERT(this->isUninitialized());
    230 
    231     fAtlasLazyProxy = proxyProvider->createFullyLazyProxy(
    232             [this](GrResourceProvider* resourceProvider) {
    233                 if (!resourceProvider) {
    234                     return sk_sp<GrTexture>();
    235                 }
    236                 SkASSERT(fHasAtlas);
    237                 SkASSERT(!fHasAtlasTransform);
    238 
    239                 GrTextureProxy* textureProxy = fAtlas ? fAtlas->textureProxy() : nullptr;
    240                 if (!textureProxy || !textureProxy->instantiate(resourceProvider)) {
    241                     fAtlasScale = fAtlasTranslate = {0, 0};
    242                     SkDEBUGCODE(fHasAtlasTransform = true);
    243                     return sk_sp<GrTexture>();
    244                 }
    245 
    246                 SkASSERT(kTopLeft_GrSurfaceOrigin == textureProxy->origin());
    247 
    248                 fAtlasScale = {1.f / textureProxy->width(), 1.f / textureProxy->height()};
    249                 fAtlasTranslate = {fAtlasOffsetX * fAtlasScale.x(),
    250                                    fAtlasOffsetY * fAtlasScale.y()};
    251                 SkDEBUGCODE(fHasAtlasTransform = true);
    252 
    253                 return sk_ref_sp(textureProxy->priv().peekTexture());
    254             },
    255             GrProxyProvider::Renderable::kYes, kTopLeft_GrSurfaceOrigin, kAlpha_half_GrPixelConfig);
    256 
    257     const SkRect& pathDevBounds = deviceSpacePath.getBounds();
    258     if (SkTMax(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) {
    259         // The path is too large. We need to crop it or analytic AA can run out of fp32 precision.
    260         crop_path(deviceSpacePath, SkIRect::MakeWH(rtWidth, rtHeight), &fDeviceSpacePath);
    261     } else {
    262         fDeviceSpacePath = deviceSpacePath;
    263     }
    264     deviceSpacePath.getBounds().roundOut(&fPathDevIBounds);
    265     fAccessRect = accessRect;
    266 }
    267 
    268 void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
    269                                               const uint32_t* opListIDs, int numOpListIDs,
    270                                               SkTArray<sk_sp<GrRenderTargetContext>>* results) {
    271     using PathInstance = GrCCPathProcessor::Instance;
    272 
    273     SkASSERT(!fFlushing);
    274     SkASSERT(!fPerFlushIndexBuffer);
    275     SkASSERT(!fPerFlushVertexBuffer);
    276     SkASSERT(!fPerFlushInstanceBuffer);
    277     SkASSERT(!fPerFlushPathParser);
    278     SkASSERT(fPerFlushAtlases.empty());
    279     SkDEBUGCODE(fFlushing = true);
    280 
    281     if (fRTPendingPathsMap.empty()) {
    282         return;  // Nothing to draw.
    283     }
    284 
    285     fPerFlushResourcesAreValid = false;
    286 
    287     // Count the paths that are being flushed.
    288     int maxTotalPaths = 0, maxPathPoints = 0, numSkPoints = 0, numSkVerbs = 0;
    289     SkDEBUGCODE(int numClipPaths = 0);
    290     for (int i = 0; i < numOpListIDs; ++i) {
    291         auto it = fRTPendingPathsMap.find(opListIDs[i]);
    292         if (fRTPendingPathsMap.end() == it) {
    293             continue;
    294         }
    295         const RTPendingPaths& rtPendingPaths = it->second;
    296 
    297         SkTInternalLList<DrawPathsOp>::Iter drawOpsIter;
    298         drawOpsIter.init(rtPendingPaths.fDrawOps,
    299                          SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart);
    300         while (DrawPathsOp* op = drawOpsIter.get()) {
    301             for (const DrawPathsOp::SingleDraw* draw = op->head(); draw; draw = draw->fNext) {
    302                 ++maxTotalPaths;
    303                 maxPathPoints = SkTMax(draw->fPath.countPoints(), maxPathPoints);
    304                 numSkPoints += draw->fPath.countPoints();
    305                 numSkVerbs += draw->fPath.countVerbs();
    306             }
    307             drawOpsIter.next();
    308         }
    309 
    310         maxTotalPaths += rtPendingPaths.fClipPaths.size();
    311         SkDEBUGCODE(numClipPaths += rtPendingPaths.fClipPaths.size());
    312         for (const auto& clipsIter : rtPendingPaths.fClipPaths) {
    313             const SkPath& path = clipsIter.second.deviceSpacePath();
    314             maxPathPoints = SkTMax(path.countPoints(), maxPathPoints);
    315             numSkPoints += path.countPoints();
    316             numSkVerbs += path.countVerbs();
    317         }
    318     }
    319 
    320     if (!maxTotalPaths) {
    321         return;  // Nothing to draw.
    322     }
    323 
    324     // Allocate GPU buffers.
    325     fPerFlushIndexBuffer = GrCCPathProcessor::FindIndexBuffer(onFlushRP);
    326     if (!fPerFlushIndexBuffer) {
    327         SkDebugf("WARNING: failed to allocate ccpr path index buffer.\n");
    328         return;
    329     }
    330 
    331     fPerFlushVertexBuffer = GrCCPathProcessor::FindVertexBuffer(onFlushRP);
    332     if (!fPerFlushVertexBuffer) {
    333         SkDebugf("WARNING: failed to allocate ccpr path vertex buffer.\n");
    334         return;
    335     }
    336 
    337     fPerFlushInstanceBuffer =
    338             onFlushRP->makeBuffer(kVertex_GrBufferType, maxTotalPaths * sizeof(PathInstance));
    339     if (!fPerFlushInstanceBuffer) {
    340         SkDebugf("WARNING: failed to allocate path instance buffer. No paths will be drawn.\n");
    341         return;
    342     }
    343 
    344     PathInstance* pathInstanceData = static_cast<PathInstance*>(fPerFlushInstanceBuffer->map());
    345     SkASSERT(pathInstanceData);
    346     int pathInstanceIdx = 0;
    347 
    348     fPerFlushPathParser = sk_make_sp<GrCCPathParser>(maxTotalPaths, maxPathPoints, numSkPoints,
    349                                                      numSkVerbs);
    350     SkDEBUGCODE(int skippedTotalPaths = 0);
    351 
    352     // Allocate atlas(es) and fill out GPU instance buffers.
    353     for (int i = 0; i < numOpListIDs; ++i) {
    354         auto it = fRTPendingPathsMap.find(opListIDs[i]);
    355         if (fRTPendingPathsMap.end() == it) {
    356             continue;
    357         }
    358         RTPendingPaths& rtPendingPaths = it->second;
    359 
    360         SkTInternalLList<DrawPathsOp>::Iter drawOpsIter;
    361         drawOpsIter.init(rtPendingPaths.fDrawOps,
    362                          SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart);
    363         while (DrawPathsOp* op = drawOpsIter.get()) {
    364             pathInstanceIdx = op->setupResources(onFlushRP, pathInstanceData, pathInstanceIdx);
    365             drawOpsIter.next();
    366             SkDEBUGCODE(skippedTotalPaths += op->numSkippedInstances_debugOnly());
    367         }
    368 
    369         for (auto& clipsIter : rtPendingPaths.fClipPaths) {
    370             clipsIter.second.placePathInAtlas(this, onFlushRP, fPerFlushPathParser.get());
    371         }
    372     }
    373 
    374     fPerFlushInstanceBuffer->unmap();
    375 
    376     SkASSERT(pathInstanceIdx == maxTotalPaths - skippedTotalPaths - numClipPaths);
    377 
    378     if (!fPerFlushAtlases.empty()) {
    379         auto coverageCountBatchID = fPerFlushPathParser->closeCurrentBatch();
    380         fPerFlushAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
    381     }
    382 
    383     if (!fPerFlushPathParser->finalize(onFlushRP)) {
    384         SkDebugf("WARNING: failed to allocate GPU buffers for CCPR. No paths will be drawn.\n");
    385         return;
    386     }
    387 
    388     // Draw the atlas(es).
    389     GrTAllocator<GrCCAtlas>::Iter atlasIter(&fPerFlushAtlases);
    390     while (atlasIter.next()) {
    391         if (auto rtc = atlasIter.get()->finalize(onFlushRP, fPerFlushPathParser)) {
    392             results->push_back(std::move(rtc));
    393         }
    394     }
    395 
    396     fPerFlushResourcesAreValid = true;
    397 }
    398 
    399 int CCPR::DrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP,
    400                                       GrCCPathProcessor::Instance* pathInstanceData,
    401                                       int pathInstanceIdx) {
    402     GrCCPathParser* parser = fCCPR->fPerFlushPathParser.get();
    403     const GrCCAtlas* currentAtlas = nullptr;
    404     SkASSERT(fInstanceCount > 0);
    405     SkASSERT(-1 == fBaseInstance);
    406     fBaseInstance = pathInstanceIdx;
    407 
    408     for (const SingleDraw* draw = this->head(); draw; draw = draw->fNext) {
    409         // parsePath gives us two tight bounding boxes: one in device space, as well as a second
    410         // one rotated an additional 45 degrees. The path vertex shader uses these two bounding
    411         // boxes to generate an octagon that circumscribes the path.
    412         SkRect devBounds, devBounds45;
    413         parser->parsePath(draw->fMatrix, draw->fPath, &devBounds, &devBounds45);
    414 
    415         SkIRect devIBounds;
    416         devBounds.roundOut(&devIBounds);
    417 
    418         int16_t offsetX, offsetY;
    419         GrCCAtlas* atlas = fCCPR->placeParsedPathInAtlas(onFlushRP, draw->fClipIBounds, devIBounds,
    420                                                          &offsetX, &offsetY);
    421         if (!atlas) {
    422             SkDEBUGCODE(++fNumSkippedInstances);
    423             continue;
    424         }
    425         if (currentAtlas != atlas) {
    426             if (currentAtlas) {
    427                 this->addAtlasBatch(currentAtlas, pathInstanceIdx);
    428             }
    429             currentAtlas = atlas;
    430         }
    431 
    432         const SkMatrix& m = draw->fMatrix;
    433         pathInstanceData[pathInstanceIdx++] = {
    434                 devBounds,
    435                 devBounds45,
    436                 {{m.getScaleX(), m.getSkewY(), m.getSkewX(), m.getScaleY()}},
    437                 {{m.getTranslateX(), m.getTranslateY()}},
    438                 {{offsetX, offsetY}},
    439                 draw->fColor};
    440     }
    441 
    442     SkASSERT(pathInstanceIdx == fBaseInstance + fInstanceCount - fNumSkippedInstances);
    443     if (currentAtlas) {
    444         this->addAtlasBatch(currentAtlas, pathInstanceIdx);
    445     }
    446 
    447     return pathInstanceIdx;
    448 }
    449 
    450 void CCPR::ClipPath::placePathInAtlas(GrCoverageCountingPathRenderer* ccpr,
    451                                       GrOnFlushResourceProvider* onFlushRP,
    452                                       GrCCPathParser* parser) {
    453     SkASSERT(!this->isUninitialized());
    454     SkASSERT(!fHasAtlas);
    455     parser->parseDeviceSpacePath(fDeviceSpacePath);
    456     fAtlas = ccpr->placeParsedPathInAtlas(onFlushRP, fAccessRect, fPathDevIBounds, &fAtlasOffsetX,
    457                                           &fAtlasOffsetY);
    458     SkDEBUGCODE(fHasAtlas = true);
    459 }
    460 
    461 GrCCAtlas* GrCoverageCountingPathRenderer::placeParsedPathInAtlas(
    462         GrOnFlushResourceProvider* onFlushRP,
    463         const SkIRect& clipIBounds,
    464         const SkIRect& pathIBounds,
    465         int16_t* atlasOffsetX,
    466         int16_t* atlasOffsetY) {
    467     using ScissorMode = GrCCPathParser::ScissorMode;
    468 
    469     ScissorMode scissorMode;
    470     SkIRect clippedPathIBounds;
    471     if (clipIBounds.contains(pathIBounds)) {
    472         clippedPathIBounds = pathIBounds;
    473         scissorMode = ScissorMode::kNonScissored;
    474     } else if (clippedPathIBounds.intersect(clipIBounds, pathIBounds)) {
    475         scissorMode = ScissorMode::kScissored;
    476     } else {
    477         fPerFlushPathParser->discardParsedPath();
    478         return nullptr;
    479     }
    480 
    481     SkIPoint16 atlasLocation;
    482     int h = clippedPathIBounds.height(), w = clippedPathIBounds.width();
    483     if (fPerFlushAtlases.empty() || !fPerFlushAtlases.back().addRect(w, h, &atlasLocation)) {
    484         if (!fPerFlushAtlases.empty()) {
    485             // The atlas is out of room and can't grow any bigger.
    486             auto coverageCountBatchID = fPerFlushPathParser->closeCurrentBatch();
    487             fPerFlushAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
    488         }
    489         fPerFlushAtlases.emplace_back(*onFlushRP->caps(), SkTMax(w, h));
    490         SkAssertResult(fPerFlushAtlases.back().addRect(w, h, &atlasLocation));
    491     }
    492 
    493     *atlasOffsetX = atlasLocation.x() - static_cast<int16_t>(clippedPathIBounds.left());
    494     *atlasOffsetY = atlasLocation.y() - static_cast<int16_t>(clippedPathIBounds.top());
    495     fPerFlushPathParser->saveParsedPath(scissorMode, clippedPathIBounds, *atlasOffsetX,
    496                                         *atlasOffsetY);
    497 
    498     return &fPerFlushAtlases.back();
    499 }
    500 
    501 void CCPR::DrawPathsOp::onExecute(GrOpFlushState* flushState) {
    502     SkASSERT(fCCPR->fFlushing);
    503     SkASSERT(flushState->rtCommandBuffer());
    504 
    505     if (!fCCPR->fPerFlushResourcesAreValid) {
    506         return;  // Setup failed.
    507     }
    508 
    509     SkASSERT(fBaseInstance >= 0);  // Make sure setupResources has been called.
    510 
    511     GrPipeline::InitArgs initArgs;
    512     initArgs.fFlags = fSRGBFlags;
    513     initArgs.fProxy = flushState->drawOpArgs().fProxy;
    514     initArgs.fCaps = &flushState->caps();
    515     initArgs.fResourceProvider = flushState->resourceProvider();
    516     initArgs.fDstProxy = flushState->drawOpArgs().fDstProxy;
    517     GrPipeline pipeline(initArgs, std::move(fProcessors), flushState->detachAppliedClip());
    518 
    519     int baseInstance = fBaseInstance;
    520 
    521     for (int i = 0; i < fAtlasBatches.count(); baseInstance = fAtlasBatches[i++].fEndInstanceIdx) {
    522         const AtlasBatch& batch = fAtlasBatches[i];
    523         SkASSERT(batch.fEndInstanceIdx > baseInstance);
    524 
    525         if (!batch.fAtlas->textureProxy()) {
    526             continue;  // Atlas failed to allocate.
    527         }
    528 
    529         GrCCPathProcessor pathProc(flushState->resourceProvider(),
    530                                    sk_ref_sp(batch.fAtlas->textureProxy()), this->getFillType());
    531 
    532         GrMesh mesh(GrCCPathProcessor::MeshPrimitiveType(flushState->caps()));
    533         mesh.setIndexedInstanced(fCCPR->fPerFlushIndexBuffer.get(),
    534                                  GrCCPathProcessor::NumIndicesPerInstance(flushState->caps()),
    535                                  fCCPR->fPerFlushInstanceBuffer.get(),
    536                                  batch.fEndInstanceIdx - baseInstance, baseInstance);
    537         mesh.setVertexData(fCCPR->fPerFlushVertexBuffer.get());
    538 
    539         flushState->rtCommandBuffer()->draw(pipeline, pathProc, &mesh, nullptr, 1, this->bounds());
    540     }
    541 
    542     SkASSERT(baseInstance == fBaseInstance + fInstanceCount - fNumSkippedInstances);
    543 }
    544 
    545 void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
    546                                                int numOpListIDs) {
    547     SkASSERT(fFlushing);
    548     fPerFlushAtlases.reset();
    549     fPerFlushPathParser.reset();
    550     fPerFlushInstanceBuffer.reset();
    551     fPerFlushVertexBuffer.reset();
    552     fPerFlushIndexBuffer.reset();
    553     // We wait to erase these until after flush, once Ops and FPs are done accessing their data.
    554     for (int i = 0; i < numOpListIDs; ++i) {
    555         fRTPendingPathsMap.erase(opListIDs[i]);
    556     }
    557     SkDEBUGCODE(fFlushing = false);
    558 }
    559