Home | History | Annotate | Download | only in gpu
      1 /*
      2  * Copyright 2011 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #ifndef GrGpu_DEFINED
      9 #define GrGpu_DEFINED
     10 
     11 #include "GrDrawTarget.h"
     12 #include "GrPathRendering.h"
     13 #include "GrProgramDesc.h"
     14 #include "SkPath.h"
     15 
     16 class GrContext;
     17 class GrNonInstancedVertices;
     18 class GrPath;
     19 class GrPathRange;
     20 class GrPathRenderer;
     21 class GrPathRendererChain;
     22 class GrPipeline;
     23 class GrPrimitiveProcessor;
     24 class GrStencilAttachment;
     25 class GrVertices;
     26 
     27 class GrGpu : public SkRefCnt {
     28 public:
     29     /**
     30      * Create an instance of GrGpu that matches the specified backend. If the requested backend is
     31      * not supported (at compile-time or run-time) this returns NULL. The context will not be
     32      * fully constructed and should not be used by GrGpu until after this function returns.
     33      */
     34     static GrGpu* Create(GrBackend, GrBackendContext, GrContext* context);
     35 
     36     ////////////////////////////////////////////////////////////////////////////
     37 
     38     GrGpu(GrContext* context);
     39     ~GrGpu() override;
     40 
     41     GrContext* getContext() { return fContext; }
     42     const GrContext* getContext() const { return fContext; }
     43 
     44     /**
     45      * Gets the capabilities of the draw target.
     46      */
     47     const GrDrawTargetCaps* caps() const { return fCaps.get(); }
     48 
     49     GrPathRendering* pathRendering() { return fPathRendering.get(); }
     50 
     51     // Called by GrContext when the underlying backend context has been destroyed.
     52     // GrGpu should use this to ensure that no backend API calls will be made from
     53     // here onward, including in its destructor. Subclasses should call
     54     // INHERITED::contextAbandoned() if they override this.
     55     virtual void contextAbandoned();
     56 
     57     /**
     58      * The GrGpu object normally assumes that no outsider is setting state
     59      * within the underlying 3D API's context/device/whatever. This call informs
     60      * the GrGpu that the state was modified and it shouldn't make assumptions
     61      * about the state.
     62      */
     63     void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
     64 
     65     /**
     66      * Creates a texture object. If kRenderTarget_GrSurfaceFlag the texture can
     67      * be used as a render target by calling GrTexture::asRenderTarget(). Not all
     68      * pixel configs can be used as render targets. Support for configs as textures
     69      * or render targets can be checked using GrDrawTargetCaps.
     70      *
     71      * @param desc        describes the texture to be created.
     72      * @param budgeted    does this texture count against the resource cache budget?
     73      * @param srcData     texel data to load texture. Begins with full-size
     74      *                    palette data for paletted textures. For compressed
     75      *                    formats it contains the compressed pixel data. Otherwise,
     76      *                    it contains width*height texels. If NULL texture data
     77      *                    is uninitialized.
     78      * @param rowBytes    the number of bytes between consecutive rows. Zero
     79      *                    means rows are tightly packed. This field is ignored
     80      *                    for compressed formats.
     81      *
     82      * @return    The texture object if successful, otherwise NULL.
     83      */
     84     GrTexture* createTexture(const GrSurfaceDesc& desc, bool budgeted,
     85                              const void* srcData, size_t rowBytes);
     86 
     87     /**
     88      * Implements GrContext::wrapBackendTexture
     89      */
     90     GrTexture* wrapBackendTexture(const GrBackendTextureDesc&);
     91 
     92     /**
     93      * Implements GrContext::wrapBackendTexture
     94      */
     95     GrRenderTarget* wrapBackendRenderTarget(const GrBackendRenderTargetDesc&);
     96 
     97     /**
     98      * Creates a vertex buffer.
     99      *
    100      * @param size    size in bytes of the vertex buffer
    101      * @param dynamic hints whether the data will be frequently changed
    102      *                by either GrVertexBuffer::map() or
    103      *                GrVertexBuffer::updateData().
    104      *
    105      * @return    The vertex buffer if successful, otherwise NULL.
    106      */
    107     GrVertexBuffer* createVertexBuffer(size_t size, bool dynamic);
    108 
    109     /**
    110      * Creates an index buffer.
    111      *
    112      * @param size    size in bytes of the index buffer
    113      * @param dynamic hints whether the data will be frequently changed
    114      *                by either GrIndexBuffer::map() or
    115      *                GrIndexBuffer::updateData().
    116      *
    117      * @return The index buffer if successful, otherwise NULL.
    118      */
    119     GrIndexBuffer* createIndexBuffer(size_t size, bool dynamic);
    120 
    121     /**
    122      * Resolves MSAA.
    123      */
    124     void resolveRenderTarget(GrRenderTarget* target);
    125 
    126     /**
    127      * Gets a preferred 8888 config to use for writing/reading pixel data to/from a surface with
    128      * config surfaceConfig. The returned config must have at least as many bits per channel as the
    129      * readConfig or writeConfig param.
    130      */
    131     virtual GrPixelConfig preferredReadPixelsConfig(GrPixelConfig readConfig,
    132                                                     GrPixelConfig surfaceConfig) const {
    133         return readConfig;
    134     }
    135     virtual GrPixelConfig preferredWritePixelsConfig(GrPixelConfig writeConfig,
    136                                                      GrPixelConfig surfaceConfig) const {
    137         return writeConfig;
    138     }
    139 
    140     /**
    141      * Called before uploading writing pixels to a GrTexture when the src pixel config doesn't
    142      * match the texture's config.
    143      */
    144     virtual bool canWriteTexturePixels(const GrTexture*, GrPixelConfig srcConfig) const = 0;
    145 
    146     /**
    147      * OpenGL's readPixels returns the result bottom-to-top while the skia
    148      * API is top-to-bottom. Thus we have to do a y-axis flip. The obvious
    149      * solution is to have the subclass do the flip using either the CPU or GPU.
    150      * However, the caller (GrContext) may have transformations to apply and can
    151      * simply fold in the y-flip for free. On the other hand, the subclass may
    152      * be able to do it for free itself. For example, the subclass may have to
    153      * do memcpys to handle rowBytes that aren't tight. It could do the y-flip
    154      * concurrently.
    155      *
    156      * This function returns true if a y-flip is required to put the pixels in
    157      * top-to-bottom order and the subclass cannot do it for free.
    158      *
    159      * See read pixels for the params
    160      * @return true if calling readPixels with the same set of params will
    161      *              produce bottom-to-top data
    162      */
    163      virtual bool readPixelsWillPayForYFlip(GrRenderTarget* renderTarget,
    164                                             int left, int top,
    165                                             int width, int height,
    166                                             GrPixelConfig config,
    167                                             size_t rowBytes) const = 0;
    168      /**
    169       * This should return true if reading a NxM rectangle of pixels from a
    170       * render target is faster if the target has dimensons N and M and the read
    171       * rectangle has its top-left at 0,0.
    172       */
    173      virtual bool fullReadPixelsIsFasterThanPartial() const { return false; };
    174 
    175     /**
    176      * Reads a rectangle of pixels from a render target.
    177      *
    178      * @param renderTarget  the render target to read from. NULL means the
    179      *                      current render target.
    180      * @param left          left edge of the rectangle to read (inclusive)
    181      * @param top           top edge of the rectangle to read (inclusive)
    182      * @param width         width of rectangle to read in pixels.
    183      * @param height        height of rectangle to read in pixels.
    184      * @param config        the pixel config of the destination buffer
    185      * @param buffer        memory to read the rectangle into.
    186      * @param rowBytes      the number of bytes between consecutive rows. Zero
    187      *                      means rows are tightly packed.
    188      * @param invertY       buffer should be populated bottom-to-top as opposed
    189      *                      to top-to-bottom (skia's usual order)
    190      *
    191      * @return true if the read succeeded, false if not. The read can fail
    192      *              because of a unsupported pixel config or because no render
    193      *              target is currently set.
    194      */
    195     bool readPixels(GrRenderTarget* renderTarget,
    196                     int left, int top, int width, int height,
    197                     GrPixelConfig config, void* buffer, size_t rowBytes);
    198 
    199     /**
    200      * Updates the pixels in a rectangle of a texture.
    201      *
    202      * @param left          left edge of the rectangle to write (inclusive)
    203      * @param top           top edge of the rectangle to write (inclusive)
    204      * @param width         width of rectangle to write in pixels.
    205      * @param height        height of rectangle to write in pixels.
    206      * @param config        the pixel config of the source buffer
    207      * @param buffer        memory to read pixels from
    208      * @param rowBytes      number of bytes between consecutive rows. Zero
    209      *                      means rows are tightly packed.
    210      */
    211     bool writeTexturePixels(GrTexture* texture,
    212                             int left, int top, int width, int height,
    213                             GrPixelConfig config, const void* buffer,
    214                             size_t rowBytes);
    215 
    216     /**
    217      * Clear the passed in render target. Ignores the draw state and clip. Clears the whole thing if
    218      * rect is NULL, otherwise just the rect. If canIgnoreRect is set then the entire render target
    219      * can be optionally cleared.
    220      */
    221     void clear(const SkIRect* rect, GrColor color, bool canIgnoreRect,GrRenderTarget* renderTarget);
    222 
    223 
    224     void clearStencilClip(const SkIRect& rect, bool insideClip, GrRenderTarget* renderTarget);
    225 
    226     /**
    227      * Discards the contents render target. NULL indicates that the current render target should
    228      * be discarded.
    229      **/
    230     virtual void discard(GrRenderTarget* = NULL) = 0;
    231 
    232     /**
    233      * This is can be called before allocating a texture to be a dst for copySurface. It will
    234      * populate the origin, config, and flags fields of the desc such that copySurface can
    235      * efficiently succeed. It should only succeed if it can allow copySurface to perform a copy
    236      * that would be more effecient than drawing the src to a dst render target.
    237      */
    238     virtual bool initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) = 0;
    239 
    240     // After the client interacts directly with the 3D context state the GrGpu
    241     // must resync its internal state and assumptions about 3D context state.
    242     // Each time this occurs the GrGpu bumps a timestamp.
    243     // state of the 3D context
    244     // At 10 resets / frame and 60fps a 64bit timestamp will overflow in about
    245     // a billion years.
    246     typedef uint64_t ResetTimestamp;
    247 
    248     // This timestamp is always older than the current timestamp
    249     static const ResetTimestamp kExpiredTimestamp = 0;
    250     // Returns a timestamp based on the number of times the context was reset.
    251     // This timestamp can be used to lazily detect when cached 3D context state
    252     // is dirty.
    253     ResetTimestamp getResetTimestamp() const { return fResetTimestamp; }
    254 
    255     virtual void buildProgramDesc(GrProgramDesc*,
    256                                   const GrPrimitiveProcessor&,
    257                                   const GrPipeline&,
    258                                   const GrBatchTracker&) const = 0;
    259 
    260     // Called to determine whether a copySurface call would succeed or not. Derived
    261     // classes must keep this consistent with their implementation of onCopySurface(). Fallbacks
    262     // to issuing a draw from the src to dst take place at the GrDrawTarget level and this function
    263     // should only return true if a faster copy path exists. The rect and point are pre-clipped. The
    264     // src rect and implied dst rect are guaranteed to be within the src/dst bounds and non-empty.
    265     virtual bool canCopySurface(const GrSurface* dst,
    266                                 const GrSurface* src,
    267                                 const SkIRect& srcRect,
    268                                 const SkIPoint& dstPoint) = 0;
    269 
    270     // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
    271     // take place at the GrDrawTarget level and this function implement faster copy paths. The rect
    272     // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the
    273     // src/dst bounds and non-empty.
    274     virtual bool copySurface(GrSurface* dst,
    275                              GrSurface* src,
    276                              const SkIRect& srcRect,
    277                              const SkIPoint& dstPoint) = 0;
    278 
    279     // Called before certain draws in order to guarantee coherent results from dst reads.
    280     virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
    281 
    282     struct DrawArgs {
    283         DrawArgs(const GrPrimitiveProcessor* primProc,
    284                  const GrPipeline* pipeline,
    285                  const GrProgramDesc* desc,
    286                  const GrBatchTracker* batchTracker)
    287             : fPrimitiveProcessor(primProc)
    288             , fPipeline(pipeline)
    289             , fDesc(desc)
    290             , fBatchTracker(batchTracker) {
    291             SkASSERT(primProc && pipeline && desc && batchTracker);
    292         }
    293         const GrPrimitiveProcessor* fPrimitiveProcessor;
    294         const GrPipeline* fPipeline;
    295         const GrProgramDesc* fDesc;
    296         const GrBatchTracker* fBatchTracker;
    297     };
    298 
    299     void draw(const DrawArgs&, const GrVertices&);
    300 
    301     /** None of these params are optional, pointers used just to avoid making copies. */
    302     struct StencilPathState {
    303         bool fUseHWAA;
    304         GrRenderTarget* fRenderTarget;
    305         const SkMatrix* fViewMatrix;
    306         const GrStencilSettings* fStencil;
    307         const GrScissorState* fScissor;
    308     };
    309 
    310     void stencilPath(const GrPath*, const StencilPathState&);
    311 
    312     void drawPath(const DrawArgs&, const GrPath*, const GrStencilSettings&);
    313     void drawPaths(const DrawArgs&,
    314                    const GrPathRange*,
    315                    const void* indices,
    316                    GrDrawTarget::PathIndexType,
    317                    const float transformValues[],
    318                    GrDrawTarget::PathTransformType,
    319                    int count,
    320                    const GrStencilSettings&);
    321 
    322     ///////////////////////////////////////////////////////////////////////////
    323     // Debugging and Stats
    324 
    325     class Stats {
    326     public:
    327 #if GR_GPU_STATS
    328         Stats() { this->reset(); }
    329 
    330         void reset() {
    331             fRenderTargetBinds = 0;
    332             fShaderCompilations = 0;
    333             fTextureCreates = 0;
    334             fTextureUploads = 0;
    335             fStencilAttachmentCreates = 0;
    336         }
    337 
    338         int renderTargetBinds() const { return fRenderTargetBinds; }
    339         void incRenderTargetBinds() { fRenderTargetBinds++; }
    340         int shaderCompilations() const { return fShaderCompilations; }
    341         void incShaderCompilations() { fShaderCompilations++; }
    342         int textureCreates() const { return fTextureCreates; }
    343         void incTextureCreates() { fTextureCreates++; }
    344         int textureUploads() const { return fTextureUploads; }
    345         void incTextureUploads() { fTextureUploads++; }
    346         void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
    347         void dump(SkString*);
    348 
    349     private:
    350         int fRenderTargetBinds;
    351         int fShaderCompilations;
    352         int fTextureCreates;
    353         int fTextureUploads;
    354         int fStencilAttachmentCreates;
    355 #else
    356         void dump(SkString*) {};
    357         void incRenderTargetBinds() {}
    358         void incShaderCompilations() {}
    359         void incTextureCreates() {}
    360         void incTextureUploads() {}
    361         void incStencilAttachmentCreates() {}
    362 #endif
    363     };
    364 
    365     Stats* stats() { return &fStats; }
    366 
    367     /**
    368      * Called at start and end of gpu trace marking
    369      * GR_CREATE_GPU_TRACE_MARKER(marker_str, target) will automatically call these at the start
    370      * and end of a code block respectively
    371      */
    372     void addGpuTraceMarker(const GrGpuTraceMarker* marker);
    373     void removeGpuTraceMarker(const GrGpuTraceMarker* marker);
    374 
    375     /**
    376      * Takes the current active set of markers and stores them for later use. Any current marker
    377      * in the active set is removed from the active set and the targets remove function is called.
    378      * These functions do not work as a stack so you cannot call save a second time before calling
    379      * restore. Also, it is assumed that when restore is called the current active set of markers
    380      * is empty. When the stored markers are added back into the active set, the targets add marker
    381      * is called.
    382      */
    383     void saveActiveTraceMarkers();
    384     void restoreActiveTraceMarkers();
    385 
    386     // Given a rt, find or create a stencil buffer and attach it
    387     bool attachStencilAttachmentToRenderTarget(GrRenderTarget* target);
    388 
    389 protected:
    390     // Functions used to map clip-respecting stencil tests into normal
    391     // stencil funcs supported by GPUs.
    392     static GrStencilFunc ConvertStencilFunc(bool stencilInClip,
    393                                             GrStencilFunc func);
    394     static void ConvertStencilFuncAndMask(GrStencilFunc func,
    395                                           bool clipInStencil,
    396                                           unsigned int clipBit,
    397                                           unsigned int userBits,
    398                                           unsigned int* ref,
    399                                           unsigned int* mask);
    400 
    401     const GrTraceMarkerSet& getActiveTraceMarkers() const { return fActiveTraceMarkers; }
    402 
    403     Stats                                   fStats;
    404     SkAutoTDelete<GrPathRendering>          fPathRendering;
    405     // Subclass must initialize this in its constructor.
    406     SkAutoTUnref<const GrDrawTargetCaps>    fCaps;
    407 
    408 private:
    409     // called when the 3D context state is unknown. Subclass should emit any
    410     // assumed 3D context state and dirty any state cache.
    411     virtual void onResetContext(uint32_t resetBits) = 0;
    412 
    413     // overridden by backend-specific derived class to create objects.
    414     // Texture size and sample size will have already been validated in base class before
    415     // onCreateTexture/CompressedTexture are called.
    416     virtual GrTexture* onCreateTexture(const GrSurfaceDesc& desc,
    417                                        GrGpuResource::LifeCycle lifeCycle,
    418                                        const void* srcData, size_t rowBytes) = 0;
    419     virtual GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,
    420                                                  GrGpuResource::LifeCycle lifeCycle,
    421                                                  const void* srcData) = 0;
    422     virtual GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&) = 0;
    423     virtual GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&) = 0;
    424     virtual GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) = 0;
    425     virtual GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) = 0;
    426 
    427     // overridden by backend-specific derived class to perform the clear.
    428     virtual void onClear(GrRenderTarget*, const SkIRect* rect, GrColor color,
    429                          bool canIgnoreRect) = 0;
    430 
    431 
    432     // Overridden by backend specific classes to perform a clear of the stencil clip bits.  This is
    433     // ONLY used by the the clip target
    434     virtual void onClearStencilClip(GrRenderTarget*, const SkIRect& rect, bool insideClip) = 0;
    435 
    436     // overridden by backend-specific derived class to perform the draw call.
    437     virtual void onDraw(const DrawArgs&, const GrNonInstancedVertices&) = 0;
    438     virtual void onStencilPath(const GrPath*, const StencilPathState&) = 0;
    439 
    440     virtual void onDrawPath(const DrawArgs&, const GrPath*, const GrStencilSettings&) = 0;
    441     virtual void onDrawPaths(const DrawArgs&,
    442                              const GrPathRange*,
    443                              const void* indices,
    444                              GrDrawTarget::PathIndexType,
    445                              const float transformValues[],
    446                              GrDrawTarget::PathTransformType,
    447                              int count,
    448                              const GrStencilSettings&) = 0;
    449 
    450     // overridden by backend-specific derived class to perform the read pixels.
    451     virtual bool onReadPixels(GrRenderTarget* target,
    452                               int left, int top, int width, int height,
    453                               GrPixelConfig,
    454                               void* buffer,
    455                               size_t rowBytes) = 0;
    456 
    457     // overridden by backend-specific derived class to perform the texture update
    458     virtual bool onWriteTexturePixels(GrTexture* texture,
    459                                       int left, int top, int width, int height,
    460                                       GrPixelConfig config, const void* buffer,
    461                                       size_t rowBytes) = 0;
    462 
    463     // overridden by backend-specific derived class to perform the resolve
    464     virtual void onResolveRenderTarget(GrRenderTarget* target) = 0;
    465 
    466     // width and height may be larger than rt (if underlying API allows it).
    467     // Should attach the SB to the RT. Returns false if compatible sb could
    468     // not be created.
    469     virtual bool createStencilAttachmentForRenderTarget(GrRenderTarget*, int width, int height) = 0;
    470 
    471     // attaches an existing SB to an existing RT.
    472     virtual bool attachStencilAttachmentToRenderTarget(GrStencilAttachment*, GrRenderTarget*) = 0;
    473 
    474     // clears target's entire stencil buffer to 0
    475     virtual void clearStencil(GrRenderTarget* target) = 0;
    476 
    477     virtual void didAddGpuTraceMarker() = 0;
    478     virtual void didRemoveGpuTraceMarker() = 0;
    479 
    480     void resetContext() {
    481         this->onResetContext(fResetBits);
    482         fResetBits = 0;
    483         ++fResetTimestamp;
    484     }
    485 
    486     void handleDirtyContext() {
    487         if (fResetBits) {
    488             this->resetContext();
    489         }
    490     }
    491 
    492     ResetTimestamp                                                      fResetTimestamp;
    493     uint32_t                                                            fResetBits;
    494     // To keep track that we always have at least as many debug marker adds as removes
    495     int                                                                 fGpuTraceMarkerCount;
    496     GrTraceMarkerSet                                                    fActiveTraceMarkers;
    497     GrTraceMarkerSet                                                    fStoredTraceMarkers;
    498     // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
    499     GrContext*                                                          fContext;
    500 
    501     typedef SkRefCnt INHERITED;
    502 };
    503 
    504 #endif
    505