Home | History | Annotate | Download | only in gpu
      1 /*
      2  * Copyright 2015 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #ifndef GrOpFlushState_DEFINED
      9 #define GrOpFlushState_DEFINED
     10 
     11 #include <utility>
     12 #include "GrAppliedClip.h"
     13 #include "GrBufferAllocPool.h"
     14 #include "GrDeferredUpload.h"
     15 #include "SkArenaAlloc.h"
     16 #include "SkArenaAllocList.h"
     17 #include "ops/GrMeshDrawOp.h"
     18 
     19 class GrGpu;
     20 class GrGpuCommandBuffer;
     21 class GrGpuRTCommandBuffer;
     22 class GrResourceProvider;
     23 
     24 /** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpList flush. */
     25 class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target {
     26 public:
     27     GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*);
     28 
     29     ~GrOpFlushState() final { this->reset(); }
     30 
     31     /** This is called after each op has a chance to prepare its draws and before the draws are
     32         executed. */
     33     void preExecuteDraws();
     34 
     35     void doUpload(GrDeferredTextureUploadFn&);
     36 
     37     /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
     38     void executeDrawsAndUploadsForMeshDrawOp(uint32_t opID, const SkRect& opBounds);
     39 
     40     GrGpuCommandBuffer* commandBuffer() { return fCommandBuffer; }
     41     // Helper function used by Ops that are only called via RenderTargetOpLists
     42     GrGpuRTCommandBuffer* rtCommandBuffer();
     43     void setCommandBuffer(GrGpuCommandBuffer* buffer) { fCommandBuffer = buffer; }
     44 
     45     GrGpu* gpu() { return fGpu; }
     46 
     47     void reset();
     48 
     49     /** Additional data required on a per-op basis when executing GrOps. */
     50     struct OpArgs {
     51         GrRenderTarget* renderTarget() const { return fProxy->priv().peekRenderTarget(); }
     52 
     53         GrOp* fOp;
     54         // TODO: do we still need the dst proxy here?
     55         GrRenderTargetProxy* fProxy;
     56         GrAppliedClip* fAppliedClip;
     57         GrXferProcessor::DstProxy fDstProxy;
     58     };
     59 
     60     void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
     61 
     62     const OpArgs& drawOpArgs() const {
     63         SkASSERT(fOpArgs);
     64         SkASSERT(fOpArgs->fOp);
     65         return *fOpArgs;
     66     }
     67 
     68     /** Overrides of GrDeferredUploadTarget. */
     69 
     70     const GrTokenTracker* tokenTracker() final { return fTokenTracker; }
     71     GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
     72     GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
     73 
     74     /** Overrides of GrMeshDrawOp::Target. */
     75 
     76     void draw(const GrGeometryProcessor*, const GrPipeline*, const GrMesh&) final;
     77     void* makeVertexSpace(size_t vertexSize, int vertexCount, const GrBuffer**,
     78                           int* startVertex) final;
     79     uint16_t* makeIndexSpace(int indexCount, const GrBuffer**, int* startIndex) final;
     80     void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
     81                                  const GrBuffer**, int* startVertex, int* actualVertexCount) final;
     82     uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount, const GrBuffer**,
     83                                     int* startIndex, int* actualIndexCount) final;
     84     void putBackIndices(int indexCount) final;
     85     void putBackVertices(int vertices, size_t vertexStride) final;
     86     GrRenderTargetProxy* proxy() const final { return fOpArgs->fProxy; }
     87     GrAppliedClip detachAppliedClip() final;
     88     const GrXferProcessor::DstProxy& dstProxy() const final { return fOpArgs->fDstProxy; }
     89     GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
     90     const GrCaps& caps() const final;
     91     GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
     92 
     93 private:
     94     /** GrMeshDrawOp::Target override. */
     95     SkArenaAlloc* pipelineArena() override { return &fArena; }
     96 
     97     struct InlineUpload {
     98         InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token)
     99                 : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
    100         GrDeferredTextureUploadFn fUpload;
    101         GrDeferredUploadToken fUploadBeforeToken;
    102     };
    103 
    104     // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
    105     // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
    106     // that share a geometry processor into a Draw is that it allows the Gpu object to setup
    107     // the shared state once and then issue draws for each mesh.
    108     struct Draw {
    109         int fMeshCnt = 0;
    110         GrPendingProgramElement<const GrGeometryProcessor> fGeometryProcessor;
    111         const GrPipeline* fPipeline;
    112         uint32_t fOpID;
    113     };
    114 
    115     // Storage for ops' pipelines, draws, and inline uploads.
    116     SkArenaAlloc fArena{sizeof(GrPipeline) * 100};
    117 
    118     // Store vertex and index data on behalf of ops that are flushed.
    119     GrVertexBufferAllocPool fVertexPool;
    120     GrIndexBufferAllocPool fIndexPool;
    121 
    122     // Data stored on behalf of the ops being flushed.
    123     SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
    124     SkArenaAllocList<InlineUpload> fInlineUploads;
    125     SkArenaAllocList<Draw> fDraws;
    126     // TODO: These should go in the arena. However, GrGpuCommandBuffer and other classes currently
    127     // accept contiguous arrays of meshes.
    128     SkSTArray<16, GrMesh> fMeshes;
    129 
    130     // All draws we store have an implicit draw token. This is the draw token for the first draw
    131     // in fDraws.
    132     GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken();
    133 
    134     // Info about the op that is currently preparing or executing using the flush state or null if
    135     // an op is not currently preparing of executing.
    136     OpArgs* fOpArgs = nullptr;
    137 
    138     GrGpu* fGpu;
    139     GrResourceProvider* fResourceProvider;
    140     GrTokenTracker* fTokenTracker;
    141     GrGpuCommandBuffer* fCommandBuffer = nullptr;
    142 
    143     // Variables that are used to track where we are in lists as ops are executed
    144     SkArenaAllocList<Draw>::Iter fCurrDraw;
    145     int fCurrMesh;
    146     SkArenaAllocList<InlineUpload>::Iter fCurrUpload;
    147 };
    148 
    149 #endif
    150