Home | History | Annotate | Download | only in ccpr
      1 /*
      2  * Copyright 2017 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #include "GrCoverageCountingPathRenderer.h"
      9 
     10 #include "GrCaps.h"
     11 #include "GrClip.h"
     12 #include "GrGpu.h"
     13 #include "GrGpuCommandBuffer.h"
     14 #include "GrOpFlushState.h"
     15 #include "GrProxyProvider.h"
     16 #include "GrRenderTargetOpList.h"
     17 #include "GrStyle.h"
     18 #include "GrTexture.h"
     19 #include "SkMakeUnique.h"
     20 #include "SkMatrix.h"
     21 #include "SkPathOps.h"
     22 #include "ccpr/GrCCClipProcessor.h"
     23 
     24 // Shorthand for keeping line lengths under control with nested classes...
     25 using CCPR = GrCoverageCountingPathRenderer;
     26 
     27 // If a path spans more pixels than this, we need to crop it or else analytic AA can run out of fp32
     28 // precision.
     29 static constexpr float kPathCropThreshold = 1 << 16;
     30 
     31 static void crop_path(const SkPath& path, const SkIRect& cropbox, SkPath* out) {
     32     SkPath cropPath;
     33     cropPath.addRect(SkRect::Make(cropbox));
     34     if (!Op(cropPath, path, kIntersect_SkPathOp, out)) {
     35         // This can fail if the PathOps encounter NaN or infinities.
     36         out->reset();
     37     }
     38 }
     39 
     40 bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) {
     41     const GrShaderCaps& shaderCaps = *caps.shaderCaps();
     42     return shaderCaps.integerSupport() && shaderCaps.flatInterpolationSupport() &&
     43            caps.instanceAttribSupport() && GrCaps::kNone_MapFlags != caps.mapBufferFlags() &&
     44            caps.isConfigTexturable(kAlpha_half_GrPixelConfig) &&
     45            caps.isConfigRenderable(kAlpha_half_GrPixelConfig, /*withMSAA=*/false) &&
     46            !caps.blacklistCoverageCounting();
     47 }
     48 
     49 sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
     50         const GrCaps& caps, bool drawCachablePaths) {
     51     auto ccpr = IsSupported(caps) ? new GrCoverageCountingPathRenderer(drawCachablePaths) : nullptr;
     52     return sk_sp<GrCoverageCountingPathRenderer>(ccpr);
     53 }
     54 
     55 GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
     56         const CanDrawPathArgs& args) const {
     57     if (args.fShape->hasUnstyledKey() && !fDrawCachablePaths) {
     58         return CanDrawPath::kNo;
     59     }
     60 
     61     if (!args.fShape->style().isSimpleFill() || args.fShape->inverseFilled() ||
     62         args.fViewMatrix->hasPerspective() || GrAAType::kCoverage != args.fAAType) {
     63         return CanDrawPath::kNo;
     64     }
     65 
     66     SkPath path;
     67     args.fShape->asPath(&path);
     68     if (SkPathPriv::ConicWeightCnt(path)) {
     69         return CanDrawPath::kNo;
     70     }
     71 
     72     SkRect devBounds;
     73     SkIRect devIBounds;
     74     args.fViewMatrix->mapRect(&devBounds, path.getBounds());
     75     devBounds.roundOut(&devIBounds);
     76     if (!devIBounds.intersect(*args.fClipConservativeBounds)) {
     77         // Path is completely clipped away. Our code will eventually notice this before doing any
     78         // real work.
     79         return CanDrawPath::kYes;
     80     }
     81 
     82     if (devIBounds.height() * devIBounds.width() > 256 * 256) {
     83         // Large paths can blow up the atlas fast. And they are not ideal for a two-pass rendering
     84         // algorithm. Give the simpler direct renderers a chance before we commit to drawing it.
     85         return CanDrawPath::kAsBackup;
     86     }
     87 
     88     if (args.fShape->hasUnstyledKey() && path.countVerbs() > 50) {
     89         // Complex paths do better cached in an SDF, if the renderer will accept them.
     90         return CanDrawPath::kAsBackup;
     91     }
     92 
     93     return CanDrawPath::kYes;
     94 }
     95 
     96 bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
     97     SkASSERT(!fFlushing);
     98     auto op = skstd::make_unique<DrawPathsOp>(this, args, args.fPaint.getColor());
     99     args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
    100     return true;
    101 }
    102 
    103 CCPR::DrawPathsOp::DrawPathsOp(GrCoverageCountingPathRenderer* ccpr, const DrawPathArgs& args,
    104                                GrColor color)
    105         : INHERITED(ClassID())
    106         , fCCPR(ccpr)
    107         , fSRGBFlags(GrPipeline::SRGBFlagsFromPaint(args.fPaint))
    108         , fProcessors(std::move(args.fPaint))
    109         , fTailDraw(&fHeadDraw)
    110         , fOwningRTPendingPaths(nullptr) {
    111     SkDEBUGCODE(++fCCPR->fPendingDrawOpsCount);
    112     SkDEBUGCODE(fBaseInstance = -1);
    113     SkDEBUGCODE(fInstanceCount = 1);
    114     SkDEBUGCODE(fNumSkippedInstances = 0);
    115     GrRenderTargetContext* const rtc = args.fRenderTargetContext;
    116 
    117     SkRect devBounds;
    118     args.fViewMatrix->mapRect(&devBounds, args.fShape->bounds());
    119     args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &fHeadDraw.fClipIBounds,
    120                                       nullptr);
    121     if (SkTMax(devBounds.height(), devBounds.width()) > kPathCropThreshold) {
    122         // The path is too large. We need to crop it or analytic AA can run out of fp32 precision.
    123         SkPath path;
    124         args.fShape->asPath(&path);
    125         path.transform(*args.fViewMatrix);
    126         fHeadDraw.fMatrix.setIdentity();
    127         crop_path(path, fHeadDraw.fClipIBounds, &fHeadDraw.fPath);
    128         devBounds = fHeadDraw.fPath.getBounds();
    129     } else {
    130         fHeadDraw.fMatrix = *args.fViewMatrix;
    131         args.fShape->asPath(&fHeadDraw.fPath);
    132     }
    133     fHeadDraw.fColor = color;  // Can't call args.fPaint.getColor() because it has been std::move'd.
    134 
    135     // FIXME: intersect with clip bounds to (hopefully) improve batching.
    136     // (This is nontrivial due to assumptions in generating the octagon cover geometry.)
    137     this->setBounds(devBounds, GrOp::HasAABloat::kYes, GrOp::IsZeroArea::kNo);
    138 }
    139 
    140 CCPR::DrawPathsOp::~DrawPathsOp() {
    141     if (fOwningRTPendingPaths) {
    142         // Remove CCPR's dangling pointer to this Op before deleting it.
    143         fOwningRTPendingPaths->fDrawOps.remove(this);
    144     }
    145     SkDEBUGCODE(--fCCPR->fPendingDrawOpsCount);
    146 }
    147 
    148 GrDrawOp::RequiresDstTexture CCPR::DrawPathsOp::finalize(const GrCaps& caps,
    149                                                          const GrAppliedClip* clip,
    150                                                          GrPixelConfigIsClamped dstIsClamped) {
    151     SkASSERT(!fCCPR->fFlushing);
    152     // There should only be one single path draw in this Op right now.
    153     SkASSERT(1 == fInstanceCount);
    154     SkASSERT(&fHeadDraw == fTailDraw);
    155     GrProcessorSet::Analysis analysis =
    156             fProcessors.finalize(fHeadDraw.fColor, GrProcessorAnalysisCoverage::kSingleChannel,
    157                                  clip, false, caps, dstIsClamped, &fHeadDraw.fColor);
    158     return analysis.requiresDstTexture() ? RequiresDstTexture::kYes : RequiresDstTexture::kNo;
    159 }
    160 
    161 bool CCPR::DrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps& caps) {
    162     DrawPathsOp* that = op->cast<DrawPathsOp>();
    163     SkASSERT(fCCPR == that->fCCPR);
    164     SkASSERT(!fCCPR->fFlushing);
    165     SkASSERT(fOwningRTPendingPaths);
    166     SkASSERT(fInstanceCount);
    167     SkASSERT(!that->fOwningRTPendingPaths || that->fOwningRTPendingPaths == fOwningRTPendingPaths);
    168     SkASSERT(that->fInstanceCount);
    169 
    170     if (this->getFillType() != that->getFillType() || fSRGBFlags != that->fSRGBFlags ||
    171         fProcessors != that->fProcessors) {
    172         return false;
    173     }
    174 
    175     fTailDraw->fNext = &fOwningRTPendingPaths->fDrawsAllocator.push_back(that->fHeadDraw);
    176     fTailDraw = (that->fTailDraw == &that->fHeadDraw) ? fTailDraw->fNext : that->fTailDraw;
    177 
    178     this->joinBounds(*that);
    179 
    180     SkDEBUGCODE(fInstanceCount += that->fInstanceCount);
    181     SkDEBUGCODE(that->fInstanceCount = 0);
    182     return true;
    183 }
    184 
    185 void CCPR::DrawPathsOp::wasRecorded(GrRenderTargetOpList* opList) {
    186     SkASSERT(!fCCPR->fFlushing);
    187     SkASSERT(!fOwningRTPendingPaths);
    188     fOwningRTPendingPaths = &fCCPR->fRTPendingPathsMap[opList->uniqueID()];
    189     fOwningRTPendingPaths->fDrawOps.addToTail(this);
    190 }
    191 
    192 bool GrCoverageCountingPathRenderer::canMakeClipProcessor(const SkPath& deviceSpacePath) const {
    193     if (!fDrawCachablePaths && !deviceSpacePath.isVolatile()) {
    194         return false;
    195     }
    196 
    197     if (SkPathPriv::ConicWeightCnt(deviceSpacePath)) {
    198         return false;
    199     }
    200 
    201     return true;
    202 }
    203 
    204 std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
    205         GrProxyProvider* proxyProvider,
    206         uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
    207         int rtWidth, int rtHeight) {
    208     using MustCheckBounds = GrCCClipProcessor::MustCheckBounds;
    209 
    210     SkASSERT(!fFlushing);
    211     SkASSERT(this->canMakeClipProcessor(deviceSpacePath));
    212 
    213     ClipPath& clipPath = fRTPendingPathsMap[opListID].fClipPaths[deviceSpacePath.getGenerationID()];
    214     if (clipPath.isUninitialized()) {
    215         // This ClipPath was just created during lookup. Initialize it.
    216         clipPath.init(proxyProvider, deviceSpacePath, accessRect, rtWidth, rtHeight);
    217     } else {
    218         clipPath.addAccess(accessRect);
    219     }
    220 
    221     bool mustCheckBounds = !clipPath.pathDevIBounds().contains(accessRect);
    222     return skstd::make_unique<GrCCClipProcessor>(&clipPath, MustCheckBounds(mustCheckBounds),
    223                                                  deviceSpacePath.getFillType());
    224 }
    225 
    226 void CCPR::ClipPath::init(GrProxyProvider* proxyProvider,
    227                           const SkPath& deviceSpacePath, const SkIRect& accessRect,
    228                           int rtWidth, int rtHeight) {
    229     SkASSERT(this->isUninitialized());
    230 
    231     fAtlasLazyProxy = proxyProvider->createFullyLazyProxy(
    232             [this](GrResourceProvider* resourceProvider, GrSurfaceOrigin* outOrigin) {
    233                 if (!resourceProvider) {
    234                     return sk_sp<GrTexture>();
    235                 }
    236                 SkASSERT(fHasAtlas);
    237                 SkASSERT(!fHasAtlasTransform);
    238 
    239                 GrTextureProxy* textureProxy = fAtlas ? fAtlas->textureProxy() : nullptr;
    240                 if (!textureProxy || !textureProxy->instantiate(resourceProvider)) {
    241                     fAtlasScale = fAtlasTranslate = {0, 0};
    242                     SkDEBUGCODE(fHasAtlasTransform = true);
    243                     return sk_sp<GrTexture>();
    244                 }
    245 
    246                 fAtlasScale = {1.f / textureProxy->width(), 1.f / textureProxy->height()};
    247                 fAtlasTranslate = {fAtlasOffsetX * fAtlasScale.x(),
    248                                    fAtlasOffsetY * fAtlasScale.y()};
    249                 if (kBottomLeft_GrSurfaceOrigin == textureProxy->origin()) {
    250                     fAtlasScale.fY = -fAtlasScale.y();
    251                     fAtlasTranslate.fY = 1 - fAtlasTranslate.y();
    252                 }
    253                 SkDEBUGCODE(fHasAtlasTransform = true);
    254 
    255                 *outOrigin = textureProxy->origin();
    256                 return sk_ref_sp(textureProxy->priv().peekTexture());
    257             },
    258             GrProxyProvider::Renderable::kYes, kAlpha_half_GrPixelConfig);
    259 
    260     const SkRect& pathDevBounds = deviceSpacePath.getBounds();
    261     if (SkTMax(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) {
    262         // The path is too large. We need to crop it or analytic AA can run out of fp32 precision.
    263         crop_path(deviceSpacePath, SkIRect::MakeWH(rtWidth, rtHeight), &fDeviceSpacePath);
    264     } else {
    265         fDeviceSpacePath = deviceSpacePath;
    266     }
    267     deviceSpacePath.getBounds().roundOut(&fPathDevIBounds);
    268     fAccessRect = accessRect;
    269 }
    270 
    271 void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
    272                                               const uint32_t* opListIDs, int numOpListIDs,
    273                                               SkTArray<sk_sp<GrRenderTargetContext>>* results) {
    274     using PathInstance = GrCCPathProcessor::Instance;
    275 
    276     SkASSERT(!fFlushing);
    277     SkASSERT(!fPerFlushIndexBuffer);
    278     SkASSERT(!fPerFlushVertexBuffer);
    279     SkASSERT(!fPerFlushInstanceBuffer);
    280     SkASSERT(!fPerFlushPathParser);
    281     SkASSERT(fPerFlushAtlases.empty());
    282     SkDEBUGCODE(fFlushing = true);
    283 
    284     if (fRTPendingPathsMap.empty()) {
    285         return;  // Nothing to draw.
    286     }
    287 
    288     fPerFlushResourcesAreValid = false;
    289 
    290     // Count the paths that are being flushed.
    291     int maxTotalPaths = 0, maxPathPoints = 0, numSkPoints = 0, numSkVerbs = 0;
    292     SkDEBUGCODE(int numClipPaths = 0);
    293     for (int i = 0; i < numOpListIDs; ++i) {
    294         auto it = fRTPendingPathsMap.find(opListIDs[i]);
    295         if (fRTPendingPathsMap.end() == it) {
    296             continue;
    297         }
    298         const RTPendingPaths& rtPendingPaths = it->second;
    299 
    300         SkTInternalLList<DrawPathsOp>::Iter drawOpsIter;
    301         drawOpsIter.init(rtPendingPaths.fDrawOps,
    302                          SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart);
    303         while (DrawPathsOp* op = drawOpsIter.get()) {
    304             for (const DrawPathsOp::SingleDraw* draw = op->head(); draw; draw = draw->fNext) {
    305                 ++maxTotalPaths;
    306                 maxPathPoints = SkTMax(draw->fPath.countPoints(), maxPathPoints);
    307                 numSkPoints += draw->fPath.countPoints();
    308                 numSkVerbs += draw->fPath.countVerbs();
    309             }
    310             drawOpsIter.next();
    311         }
    312 
    313         maxTotalPaths += rtPendingPaths.fClipPaths.size();
    314         SkDEBUGCODE(numClipPaths += rtPendingPaths.fClipPaths.size());
    315         for (const auto& clipsIter : rtPendingPaths.fClipPaths) {
    316             const SkPath& path = clipsIter.second.deviceSpacePath();
    317             maxPathPoints = SkTMax(path.countPoints(), maxPathPoints);
    318             numSkPoints += path.countPoints();
    319             numSkVerbs += path.countVerbs();
    320         }
    321     }
    322 
    323     if (!maxTotalPaths) {
    324         return;  // Nothing to draw.
    325     }
    326 
    327     // Allocate GPU buffers.
    328     fPerFlushIndexBuffer = GrCCPathProcessor::FindIndexBuffer(onFlushRP);
    329     if (!fPerFlushIndexBuffer) {
    330         SkDebugf("WARNING: failed to allocate ccpr path index buffer.\n");
    331         return;
    332     }
    333 
    334     fPerFlushVertexBuffer = GrCCPathProcessor::FindVertexBuffer(onFlushRP);
    335     if (!fPerFlushVertexBuffer) {
    336         SkDebugf("WARNING: failed to allocate ccpr path vertex buffer.\n");
    337         return;
    338     }
    339 
    340     fPerFlushInstanceBuffer =
    341             onFlushRP->makeBuffer(kVertex_GrBufferType, maxTotalPaths * sizeof(PathInstance));
    342     if (!fPerFlushInstanceBuffer) {
    343         SkDebugf("WARNING: failed to allocate path instance buffer. No paths will be drawn.\n");
    344         return;
    345     }
    346 
    347     PathInstance* pathInstanceData = static_cast<PathInstance*>(fPerFlushInstanceBuffer->map());
    348     SkASSERT(pathInstanceData);
    349     int pathInstanceIdx = 0;
    350 
    351     fPerFlushPathParser = sk_make_sp<GrCCPathParser>(maxTotalPaths, maxPathPoints, numSkPoints,
    352                                                      numSkVerbs);
    353     SkDEBUGCODE(int skippedTotalPaths = 0);
    354 
    355     // Allocate atlas(es) and fill out GPU instance buffers.
    356     for (int i = 0; i < numOpListIDs; ++i) {
    357         auto it = fRTPendingPathsMap.find(opListIDs[i]);
    358         if (fRTPendingPathsMap.end() == it) {
    359             continue;
    360         }
    361         RTPendingPaths& rtPendingPaths = it->second;
    362 
    363         SkTInternalLList<DrawPathsOp>::Iter drawOpsIter;
    364         drawOpsIter.init(rtPendingPaths.fDrawOps,
    365                          SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart);
    366         while (DrawPathsOp* op = drawOpsIter.get()) {
    367             pathInstanceIdx = op->setupResources(onFlushRP, pathInstanceData, pathInstanceIdx);
    368             drawOpsIter.next();
    369             SkDEBUGCODE(skippedTotalPaths += op->numSkippedInstances_debugOnly());
    370         }
    371 
    372         for (auto& clipsIter : rtPendingPaths.fClipPaths) {
    373             clipsIter.second.placePathInAtlas(this, onFlushRP, fPerFlushPathParser.get());
    374         }
    375     }
    376 
    377     fPerFlushInstanceBuffer->unmap();
    378 
    379     SkASSERT(pathInstanceIdx == maxTotalPaths - skippedTotalPaths - numClipPaths);
    380 
    381     if (!fPerFlushAtlases.empty()) {
    382         auto coverageCountBatchID = fPerFlushPathParser->closeCurrentBatch();
    383         fPerFlushAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
    384     }
    385 
    386     if (!fPerFlushPathParser->finalize(onFlushRP)) {
    387         SkDebugf("WARNING: failed to allocate GPU buffers for CCPR. No paths will be drawn.\n");
    388         return;
    389     }
    390 
    391     // Draw the atlas(es).
    392     GrTAllocator<GrCCAtlas>::Iter atlasIter(&fPerFlushAtlases);
    393     while (atlasIter.next()) {
    394         if (auto rtc = atlasIter.get()->finalize(onFlushRP, fPerFlushPathParser)) {
    395             results->push_back(std::move(rtc));
    396         }
    397     }
    398 
    399     fPerFlushResourcesAreValid = true;
    400 }
    401 
    402 int CCPR::DrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP,
    403                                       GrCCPathProcessor::Instance* pathInstanceData,
    404                                       int pathInstanceIdx) {
    405     GrCCPathParser* parser = fCCPR->fPerFlushPathParser.get();
    406     const GrCCAtlas* currentAtlas = nullptr;
    407     SkASSERT(fInstanceCount > 0);
    408     SkASSERT(-1 == fBaseInstance);
    409     fBaseInstance = pathInstanceIdx;
    410 
    411     for (const SingleDraw* draw = this->head(); draw; draw = draw->fNext) {
    412         // parsePath gives us two tight bounding boxes: one in device space, as well as a second
    413         // one rotated an additional 45 degrees. The path vertex shader uses these two bounding
    414         // boxes to generate an octagon that circumscribes the path.
    415         SkRect devBounds, devBounds45;
    416         parser->parsePath(draw->fMatrix, draw->fPath, &devBounds, &devBounds45);
    417 
    418         SkIRect devIBounds;
    419         devBounds.roundOut(&devIBounds);
    420 
    421         int16_t offsetX, offsetY;
    422         GrCCAtlas* atlas = fCCPR->placeParsedPathInAtlas(onFlushRP, draw->fClipIBounds, devIBounds,
    423                                                          &offsetX, &offsetY);
    424         if (!atlas) {
    425             SkDEBUGCODE(++fNumSkippedInstances);
    426             continue;
    427         }
    428         if (currentAtlas != atlas) {
    429             if (currentAtlas) {
    430                 this->addAtlasBatch(currentAtlas, pathInstanceIdx);
    431             }
    432             currentAtlas = atlas;
    433         }
    434 
    435         const SkMatrix& m = draw->fMatrix;
    436         pathInstanceData[pathInstanceIdx++] = {
    437                 devBounds,
    438                 devBounds45,
    439                 {{m.getScaleX(), m.getSkewY(), m.getSkewX(), m.getScaleY()}},
    440                 {{m.getTranslateX(), m.getTranslateY()}},
    441                 {{offsetX, offsetY}},
    442                 draw->fColor};
    443     }
    444 
    445     SkASSERT(pathInstanceIdx == fBaseInstance + fInstanceCount - fNumSkippedInstances);
    446     if (currentAtlas) {
    447         this->addAtlasBatch(currentAtlas, pathInstanceIdx);
    448     }
    449 
    450     return pathInstanceIdx;
    451 }
    452 
    453 void CCPR::ClipPath::placePathInAtlas(GrCoverageCountingPathRenderer* ccpr,
    454                                       GrOnFlushResourceProvider* onFlushRP,
    455                                       GrCCPathParser* parser) {
    456     SkASSERT(!this->isUninitialized());
    457     SkASSERT(!fHasAtlas);
    458     parser->parseDeviceSpacePath(fDeviceSpacePath);
    459     fAtlas = ccpr->placeParsedPathInAtlas(onFlushRP, fAccessRect, fPathDevIBounds, &fAtlasOffsetX,
    460                                           &fAtlasOffsetY);
    461     SkDEBUGCODE(fHasAtlas = true);
    462 }
    463 
    464 GrCCAtlas* GrCoverageCountingPathRenderer::placeParsedPathInAtlas(
    465         GrOnFlushResourceProvider* onFlushRP,
    466         const SkIRect& clipIBounds,
    467         const SkIRect& pathIBounds,
    468         int16_t* atlasOffsetX,
    469         int16_t* atlasOffsetY) {
    470     using ScissorMode = GrCCPathParser::ScissorMode;
    471 
    472     ScissorMode scissorMode;
    473     SkIRect clippedPathIBounds;
    474     if (clipIBounds.contains(pathIBounds)) {
    475         clippedPathIBounds = pathIBounds;
    476         scissorMode = ScissorMode::kNonScissored;
    477     } else if (clippedPathIBounds.intersect(clipIBounds, pathIBounds)) {
    478         scissorMode = ScissorMode::kScissored;
    479     } else {
    480         fPerFlushPathParser->discardParsedPath();
    481         return nullptr;
    482     }
    483 
    484     SkIPoint16 atlasLocation;
    485     int h = clippedPathIBounds.height(), w = clippedPathIBounds.width();
    486     if (fPerFlushAtlases.empty() || !fPerFlushAtlases.back().addRect(w, h, &atlasLocation)) {
    487         if (!fPerFlushAtlases.empty()) {
    488             // The atlas is out of room and can't grow any bigger.
    489             auto coverageCountBatchID = fPerFlushPathParser->closeCurrentBatch();
    490             fPerFlushAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
    491         }
    492         fPerFlushAtlases.emplace_back(*onFlushRP->caps(), w, h).addRect(w, h, &atlasLocation);
    493     }
    494 
    495     *atlasOffsetX = atlasLocation.x() - static_cast<int16_t>(clippedPathIBounds.left());
    496     *atlasOffsetY = atlasLocation.y() - static_cast<int16_t>(clippedPathIBounds.top());
    497     fPerFlushPathParser->saveParsedPath(scissorMode, clippedPathIBounds, *atlasOffsetX,
    498                                         *atlasOffsetY);
    499 
    500     return &fPerFlushAtlases.back();
    501 }
    502 
    503 void CCPR::DrawPathsOp::onExecute(GrOpFlushState* flushState) {
    504     SkASSERT(fCCPR->fFlushing);
    505     SkASSERT(flushState->rtCommandBuffer());
    506 
    507     if (!fCCPR->fPerFlushResourcesAreValid) {
    508         return;  // Setup failed.
    509     }
    510 
    511     SkASSERT(fBaseInstance >= 0);  // Make sure setupResources has been called.
    512 
    513     GrPipeline::InitArgs initArgs;
    514     initArgs.fFlags = fSRGBFlags;
    515     initArgs.fProxy = flushState->drawOpArgs().fProxy;
    516     initArgs.fCaps = &flushState->caps();
    517     initArgs.fResourceProvider = flushState->resourceProvider();
    518     initArgs.fDstProxy = flushState->drawOpArgs().fDstProxy;
    519     GrPipeline pipeline(initArgs, std::move(fProcessors), flushState->detachAppliedClip());
    520 
    521     int baseInstance = fBaseInstance;
    522 
    523     for (int i = 0; i < fAtlasBatches.count(); baseInstance = fAtlasBatches[i++].fEndInstanceIdx) {
    524         const AtlasBatch& batch = fAtlasBatches[i];
    525         SkASSERT(batch.fEndInstanceIdx > baseInstance);
    526 
    527         if (!batch.fAtlas->textureProxy()) {
    528             continue;  // Atlas failed to allocate.
    529         }
    530 
    531         GrCCPathProcessor pathProc(flushState->resourceProvider(),
    532                                    sk_ref_sp(batch.fAtlas->textureProxy()), this->getFillType());
    533 
    534         GrMesh mesh(GrCCPathProcessor::MeshPrimitiveType(flushState->caps()));
    535         mesh.setIndexedInstanced(fCCPR->fPerFlushIndexBuffer.get(),
    536                                  GrCCPathProcessor::NumIndicesPerInstance(flushState->caps()),
    537                                  fCCPR->fPerFlushInstanceBuffer.get(),
    538                                  batch.fEndInstanceIdx - baseInstance, baseInstance);
    539         mesh.setVertexData(fCCPR->fPerFlushVertexBuffer.get());
    540 
    541         flushState->rtCommandBuffer()->draw(pipeline, pathProc, &mesh, nullptr, 1, this->bounds());
    542     }
    543 
    544     SkASSERT(baseInstance == fBaseInstance + fInstanceCount - fNumSkippedInstances);
    545 }
    546 
    547 void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
    548                                                int numOpListIDs) {
    549     SkASSERT(fFlushing);
    550     fPerFlushAtlases.reset();
    551     fPerFlushPathParser.reset();
    552     fPerFlushInstanceBuffer.reset();
    553     fPerFlushVertexBuffer.reset();
    554     fPerFlushIndexBuffer.reset();
    555     // We wait to erase these until after flush, once Ops and FPs are done accessing their data.
    556     for (int i = 0; i < numOpListIDs; ++i) {
    557         fRTPendingPathsMap.erase(opListIDs[i]);
    558     }
    559     SkDEBUGCODE(fFlushing = false);
    560 }
    561