Home | History | Annotate | Download | only in gpu
      1 /*
      2  * Copyright 2011 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #ifndef GrGpu_DEFINED
      9 #define GrGpu_DEFINED
     10 
     11 #include "GrGpuCommandBuffer.h"
     12 #include "GrProgramDesc.h"
     13 #include "GrSwizzle.h"
     14 #include "GrAllocator.h"
     15 #include "GrTextureProducer.h"
     16 #include "GrTypes.h"
     17 #include "GrXferProcessor.h"
     18 #include "instanced/InstancedRendering.h"
     19 #include "SkPath.h"
     20 #include "SkTArray.h"
     21 #include <map>
     22 
     23 class GrBackendRenderTarget;
     24 class GrBackendSemaphore;
     25 class GrBuffer;
     26 class GrContext;
     27 struct GrContextOptions;
     28 class GrGLContext;
     29 class GrMesh;
     30 class GrPath;
     31 class GrPathRange;
     32 class GrPathRenderer;
     33 class GrPathRendererChain;
     34 class GrPathRendering;
     35 class GrPipeline;
     36 class GrPrimitiveProcessor;
     37 class GrRenderTarget;
     38 class GrSemaphore;
     39 class GrStencilAttachment;
     40 class GrStencilSettings;
     41 class GrSurface;
     42 class GrTexture;
     43 
     44 namespace gr_instanced {
     45     class InstancedOp;
     46     class InstancedRendering;
     47     class OpAllocator;
     48 }
     49 
     50 class GrGpu : public SkRefCnt {
     51 public:
     52     /**
     53      * Create an instance of GrGpu that matches the specified backend. If the requested backend is
     54      * not supported (at compile-time or run-time) this returns nullptr. The context will not be
     55      * fully constructed and should not be used by GrGpu until after this function returns.
     56      */
     57     static GrGpu* Create(GrBackend, GrBackendContext, const GrContextOptions&, GrContext* context);
     58 
     59     ////////////////////////////////////////////////////////////////////////////
     60 
     61     GrGpu(GrContext* context);
     62     ~GrGpu() override;
     63 
     64     GrContext* getContext() { return fContext; }
     65     const GrContext* getContext() const { return fContext; }
     66 
     67     /**
     68      * Gets the capabilities of the draw target.
     69      */
     70     const GrCaps* caps() const { return fCaps.get(); }
     71 
     72     GrPathRendering* pathRendering() { return fPathRendering.get();  }
     73 
     74     enum class DisconnectType {
     75         // No cleanup should be attempted, immediately cease making backend API calls
     76         kAbandon,
     77         // Free allocated resources (not known by GrResourceCache) before returning and
     78         // ensure no backend backend 3D API calls will be made after disconnect() returns.
     79         kCleanup,
     80     };
     81 
     82     // Called by GrContext when the underlying backend context is already or will be destroyed
     83     // before GrContext.
     84     virtual void disconnect(DisconnectType);
     85 
     86     /**
     87      * The GrGpu object normally assumes that no outsider is setting state
     88      * within the underlying 3D API's context/device/whatever. This call informs
     89      * the GrGpu that the state was modified and it shouldn't make assumptions
     90      * about the state.
     91      */
     92     void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
     93 
     94     /**
     95      * Creates a texture object. If kRenderTarget_GrSurfaceFlag the texture can
     96      * be used as a render target by calling GrTexture::asRenderTarget(). Not all
     97      * pixel configs can be used as render targets. Support for configs as textures
     98      * or render targets can be checked using GrCaps.
     99      *
    100      * @param desc        describes the texture to be created.
    101      * @param budgeted    does this texture count against the resource cache budget?
    102      * @param texels      array of mipmap levels containing texel data to load.
    103      *                    Each level begins with full-size palette data for paletted textures.
    104      *                    It contains width*height texels. If there is only one
    105      *                    element and it contains nullptr fPixels, texture data is
    106      *                    uninitialized.
    107      * @param mipLevelCount the number of levels in 'texels'
    108      * @return    The texture object if successful, otherwise nullptr.
    109      */
    110     sk_sp<GrTexture> createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
    111                                    const GrMipLevel texels[], int mipLevelCount);
    112 
    113     /**
    114      * Simplified createTexture() interface for when there is no initial texel data to upload.
    115      */
    116     sk_sp<GrTexture> createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted);
    117 
    118     /**
    119      * Implements GrResourceProvider::wrapBackendTexture
    120      */
    121     sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&, GrSurfaceOrigin,
    122                                         GrBackendTextureFlags, int sampleCnt, GrWrapOwnership);
    123 
    124     /**
    125      * Implements GrResourceProvider::wrapBackendRenderTarget
    126      */
    127     sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&, GrSurfaceOrigin);
    128 
    129     /**
    130      * Implements GrResourceProvider::wrapBackendTextureAsRenderTarget
    131      */
    132     sk_sp<GrRenderTarget> wrapBackendTextureAsRenderTarget(const GrBackendTexture&,
    133                                                            GrSurfaceOrigin,
    134                                                            int sampleCnt);
    135 
    136     /**
    137      * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
    138      *
    139      * @param size            size of buffer to create.
    140      * @param intendedType    hint to the graphics subsystem about what the buffer will be used for.
    141      * @param accessPattern   hint to the graphics subsystem about how the data will be accessed.
    142      * @param data            optional data with which to initialize the buffer.
    143      *
    144      * @return the buffer if successful, otherwise nullptr.
    145      */
    146     GrBuffer* createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern accessPattern,
    147                            const void* data = nullptr);
    148 
    149     /**
    150      * Creates an instanced rendering object if it is supported on this platform.
    151      */
    152     std::unique_ptr<gr_instanced::OpAllocator> createInstancedRenderingAllocator();
    153     gr_instanced::InstancedRendering* createInstancedRendering();
    154 
    155     /**
    156      * Resolves MSAA.
    157      */
    158     void resolveRenderTarget(GrRenderTarget* target);
    159 
    160     /** Info struct returned by getReadPixelsInfo about performing intermediate draws before
    161         reading pixels for performance or correctness. */
    162     struct ReadPixelTempDrawInfo {
    163         /** If the GrGpu is requesting that the caller do a draw to an intermediate surface then
    164             this is descriptor for the temp surface. The draw should always be a rect with
    165             dst 0,0,w,h. */
    166         GrSurfaceDesc   fTempSurfaceDesc;
    167         /** Indicates whether there is a performance advantage to using an exact match texture
    168             (in terms of width and height) for the intermediate texture instead of approximate. */
    169         SkBackingFit    fTempSurfaceFit;
    170         /** Swizzle to apply during the draw. This is used to compensate for either feature or
    171             performance limitations in the underlying 3D API. */
    172         GrSwizzle       fSwizzle;
    173         /** The config that should be used to read from the temp surface after the draw. This may be
    174             different than the original read config in order to compensate for swizzling. The
    175             read data will effectively be in the original read config. */
    176         GrPixelConfig   fReadConfig;
    177     };
    178 
    179     /** Describes why an intermediate draw must/should be performed before readPixels. */
    180     enum DrawPreference {
    181         /** On input means that the caller would proceed without draw if the GrGpu doesn't request
    182             one.
    183             On output means that the GrGpu is not requesting a draw. */
    184         kNoDraw_DrawPreference,
    185         /** Means that the client would prefer a draw for performance of the readback but
    186             can satisfy a straight readPixels call on the inputs without an intermediate draw.
    187             getReadPixelsInfo will never set the draw preference to this value but may leave
    188             it set. */
    189         kCallerPrefersDraw_DrawPreference,
    190         /** On output means that GrGpu would prefer a draw for performance of the readback but
    191             can satisfy a straight readPixels call on the inputs without an intermediate draw. The
    192             caller of getReadPixelsInfo should never specify this on intput. */
    193         kGpuPrefersDraw_DrawPreference,
    194         /** On input means that the caller requires a draw to do a transformation and there is no
    195             CPU fallback.
    196             On output means that GrGpu can only satisfy the readPixels request if the intermediate
    197             draw is performed.
    198           */
    199         kRequireDraw_DrawPreference
    200     };
    201 
    202     /**
    203      * Used to negotiate whether and how an intermediate draw should or must be performed before
    204      * a readPixels call. If this returns false then GrGpu could not deduce an intermediate draw
    205      * that would allow a successful readPixels call. The passed width, height, and rowBytes,
    206      * must be non-zero and already reflect clipping to the src bounds.
    207      */
    208     bool getReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, size_t rowBytes,
    209                            GrPixelConfig readConfig, DrawPreference*, ReadPixelTempDrawInfo*);
    210 
    211     /** Info struct returned by getWritePixelsInfo about performing an intermediate draw in order
    212         to write pixels to a GrSurface for either performance or correctness reasons. */
    213     struct WritePixelTempDrawInfo {
    214         /** If the GrGpu is requesting that the caller upload to an intermediate surface and draw
    215             that to the dst then this is the descriptor for the intermediate surface. The caller
    216             should upload the pixels such that the upper left pixel of the upload rect is at 0,0 in
    217             the intermediate surface.*/
    218         GrSurfaceDesc   fTempSurfaceDesc;
    219         /** Swizzle to apply during the draw. This is used to compensate for either feature or
    220             performance limitations in the underlying 3D API. */
    221         GrSwizzle       fSwizzle;
    222         /** The config that should be specified when uploading the *original* data to the temp
    223             surface before the draw. This may be different than the original src data config in
    224             order to compensate for swizzling that will occur when drawing. */
    225         GrPixelConfig   fWriteConfig;
    226     };
    227 
    228     /**
    229      * Used to negotiate whether and how an intermediate surface should be used to write pixels to
    230      * a GrSurface. If this returns false then GrGpu could not deduce an intermediate draw
    231      * that would allow a successful transfer of the src pixels to the dst. The passed width,
    232      * height, and rowBytes, must be non-zero and already reflect clipping to the dst bounds.
    233      */
    234     bool getWritePixelsInfo(GrSurface* dstSurface, int width, int height,
    235                             GrPixelConfig srcConfig, DrawPreference*, WritePixelTempDrawInfo*);
    236 
    237     /**
    238      * Reads a rectangle of pixels from a render target.
    239      *
    240      * @param surface       The surface to read from
    241      * @param left          left edge of the rectangle to read (inclusive)
    242      * @param top           top edge of the rectangle to read (inclusive)
    243      * @param width         width of rectangle to read in pixels.
    244      * @param height        height of rectangle to read in pixels.
    245      * @param config        the pixel config of the destination buffer
    246      * @param buffer        memory to read the rectangle into.
    247      * @param rowBytes      the number of bytes between consecutive rows. Zero
    248      *                      means rows are tightly packed.
    249      * @param invertY       buffer should be populated bottom-to-top as opposed
    250      *                      to top-to-bottom (skia's usual order)
    251      *
    252      * @return true if the read succeeded, false if not. The read can fail
    253      *              because of a unsupported pixel config or because no render
    254      *              target is currently set.
    255      */
    256     bool readPixels(GrSurface* surface,
    257                     int left, int top, int width, int height,
    258                     GrPixelConfig config, void* buffer, size_t rowBytes);
    259 
    260     /**
    261      * Updates the pixels in a rectangle of a surface.
    262      *
    263      * @param surface       The surface to write to.
    264      * @param left          left edge of the rectangle to write (inclusive)
    265      * @param top           top edge of the rectangle to write (inclusive)
    266      * @param width         width of rectangle to write in pixels.
    267      * @param height        height of rectangle to write in pixels.
    268      * @param config        the pixel config of the source buffer
    269      * @param texels        array of mipmap levels containing texture data
    270      * @param mipLevelCount number of levels in 'texels'
    271      */
    272     bool writePixels(GrSurface* surface,
    273                      int left, int top, int width, int height,
    274                      GrPixelConfig config,
    275                      const GrMipLevel texels[], int mipLevelCount);
    276 
    277     /**
    278      * This function is a shim which creates a SkTArray<GrMipLevel> of size 1.
    279      * It then calls writePixels with that SkTArray.
    280      *
    281      * @param buffer   memory to read pixels from.
    282      * @param rowBytes number of bytes between consecutive rows. Zero
    283      *                 means rows are tightly packed.
    284      */
    285     bool writePixels(GrSurface* surface,
    286                      int left, int top, int width, int height,
    287                      GrPixelConfig config, const void* buffer,
    288                      size_t rowBytes);
    289 
    290     /**
    291      * Updates the pixels in a rectangle of a texture using a buffer
    292      *
    293      * There are a couple of assumptions here. First, we only update the top miplevel.
    294      * And second, that any y flip needed has already been done in the buffer.
    295      *
    296      * @param texture          The texture to write to.
    297      * @param left             left edge of the rectangle to write (inclusive)
    298      * @param top              top edge of the rectangle to write (inclusive)
    299      * @param width            width of rectangle to write in pixels.
    300      * @param height           height of rectangle to write in pixels.
    301      * @param config           the pixel config of the source buffer
    302      * @param transferBuffer   GrBuffer to read pixels from (type must be "kXferCpuToGpu")
    303      * @param offset           offset from the start of the buffer
    304      * @param rowBytes         number of bytes between consecutive rows in the buffer. Zero
    305      *                         means rows are tightly packed.
    306      */
    307     bool transferPixels(GrTexture* texture,
    308                         int left, int top, int width, int height,
    309                         GrPixelConfig config, GrBuffer* transferBuffer,
    310                         size_t offset, size_t rowBytes);
    311 
    312     // After the client interacts directly with the 3D context state the GrGpu
    313     // must resync its internal state and assumptions about 3D context state.
    314     // Each time this occurs the GrGpu bumps a timestamp.
    315     // state of the 3D context
    316     // At 10 resets / frame and 60fps a 64bit timestamp will overflow in about
    317     // a billion years.
    318     typedef uint64_t ResetTimestamp;
    319 
    320     // This timestamp is always older than the current timestamp
    321     static const ResetTimestamp kExpiredTimestamp = 0;
    322     // Returns a timestamp based on the number of times the context was reset.
    323     // This timestamp can be used to lazily detect when cached 3D context state
    324     // is dirty.
    325     ResetTimestamp getResetTimestamp() const { return fResetTimestamp; }
    326 
    327     // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
    328     // take place at the GrOpList level and this function implement faster copy paths. The rect
    329     // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the
    330     // src/dst bounds and non-empty.
    331     bool copySurface(GrSurface* dst,
    332                      GrSurface* src,
    333                      const SkIRect& srcRect,
    334                      const SkIPoint& dstPoint);
    335 
    336     struct MultisampleSpecs {
    337         MultisampleSpecs(uint8_t uniqueID, int effectiveSampleCnt, const SkPoint* locations)
    338             : fUniqueID(uniqueID),
    339               fEffectiveSampleCnt(effectiveSampleCnt),
    340               fSampleLocations(locations) {}
    341 
    342         // Nonzero ID that uniquely identifies these multisample specs.
    343         uint8_t          fUniqueID;
    344         // The actual number of samples the GPU will run. NOTE: this value can be greater than the
    345         // the render target's sample count.
    346         int              fEffectiveSampleCnt;
    347         // If sample locations are supported, points to the subpixel locations at which the GPU will
    348         // sample. Pixel center is at (.5, .5), and (0, 0) indicates the top left corner.
    349         const SkPoint*   fSampleLocations;
    350     };
    351 
    352     // Finds a render target's multisample specs. The pipeline is only needed in case we need to
    353     // flush the draw state prior to querying multisample info. The pipeline is not expected to
    354     // affect the multisample information itself.
    355     const MultisampleSpecs& queryMultisampleSpecs(const GrPipeline&);
    356 
    357     // Finds the multisample specs with a given unique id.
    358     const MultisampleSpecs& getMultisampleSpecs(uint8_t uniqueID) {
    359         SkASSERT(uniqueID > 0 && uniqueID < fMultisampleSpecs.count());
    360         return fMultisampleSpecs[uniqueID];
    361     }
    362 
    363     // Creates a GrGpuCommandBuffer in which the GrOpList can send draw commands to instead of
    364     // directly to the Gpu object. This currently does not take a GrRenderTarget. The command buffer
    365     // is expected to infer the render target from the first draw, clear, or discard. This is an
    366     // awkward workaround that goes away after MDB is complete and the render target is known from
    367     // the GrRenderTargetOpList.
    368     virtual GrGpuCommandBuffer* createCommandBuffer(
    369             const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
    370             const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) = 0;
    371 
    372     // Called by GrDrawingManager when flushing.
    373     // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits).
    374     virtual void finishFlush() {}
    375 
    376     virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0;
    377     virtual bool waitFence(GrFence, uint64_t timeout = 1000) = 0;
    378     virtual void deleteFence(GrFence) const = 0;
    379 
    380     virtual sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned = true) = 0;
    381     virtual sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
    382                                                     GrWrapOwnership ownership) = 0;
    383     virtual void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush = false) = 0;
    384     virtual void waitSemaphore(sk_sp<GrSemaphore> semaphore) = 0;
    385 
    386     /**
    387      *  Put this texture in a safe and known state for use across multiple GrContexts. Depending on
    388      *  the backend, this may return a GrSemaphore. If so, other contexts should wait on that
    389      *  semaphore before using this texture.
    390      */
    391     virtual sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
    392 
    393     ///////////////////////////////////////////////////////////////////////////
    394     // Debugging and Stats
    395 
    396     class Stats {
    397     public:
    398 #if GR_GPU_STATS
    399         Stats() { this->reset(); }
    400 
    401         void reset() {
    402             fRenderTargetBinds = 0;
    403             fShaderCompilations = 0;
    404             fTextureCreates = 0;
    405             fTextureUploads = 0;
    406             fTransfersToTexture = 0;
    407             fStencilAttachmentCreates = 0;
    408             fNumDraws = 0;
    409             fNumFailedDraws = 0;
    410         }
    411 
    412         int renderTargetBinds() const { return fRenderTargetBinds; }
    413         void incRenderTargetBinds() { fRenderTargetBinds++; }
    414         int shaderCompilations() const { return fShaderCompilations; }
    415         void incShaderCompilations() { fShaderCompilations++; }
    416         int textureCreates() const { return fTextureCreates; }
    417         void incTextureCreates() { fTextureCreates++; }
    418         int textureUploads() const { return fTextureUploads; }
    419         void incTextureUploads() { fTextureUploads++; }
    420         int transfersToTexture() const { return fTransfersToTexture; }
    421         void incTransfersToTexture() { fTransfersToTexture++; }
    422         void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
    423         void incNumDraws() { fNumDraws++; }
    424         void incNumFailedDraws() { ++fNumFailedDraws; }
    425         void dump(SkString*);
    426         void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values);
    427         int numDraws() const { return fNumDraws; }
    428         int numFailedDraws() const { return fNumFailedDraws; }
    429     private:
    430         int fRenderTargetBinds;
    431         int fShaderCompilations;
    432         int fTextureCreates;
    433         int fTextureUploads;
    434         int fTransfersToTexture;
    435         int fStencilAttachmentCreates;
    436         int fNumDraws;
    437         int fNumFailedDraws;
    438 #else
    439         void dump(SkString*) {}
    440         void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {}
    441         void incRenderTargetBinds() {}
    442         void incShaderCompilations() {}
    443         void incTextureCreates() {}
    444         void incTextureUploads() {}
    445         void incTransfersToTexture() {}
    446         void incStencilAttachmentCreates() {}
    447         void incNumDraws() {}
    448         void incNumFailedDraws() {}
    449 #endif
    450     };
    451 
    452     Stats* stats() { return &fStats; }
    453 
    454     /** Creates a texture directly in the backend API without wrapping it in a GrTexture. This is
    455         only to be used for testing (particularly for testing the methods that import an externally
    456         created texture into Skia. Must be matched with a call to deleteTestingOnlyTexture(). */
    457     virtual GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h,
    458                                                             GrPixelConfig config,
    459                                                             bool isRenderTarget = false) = 0;
    460     /** Check a handle represents an actual texture in the backend API that has not been freed. */
    461     virtual bool isTestingOnlyBackendTexture(GrBackendObject) const = 0;
    462     /** If ownership of the backend texture has been transferred pass true for abandonTexture. This
    463         will do any necessary cleanup of the handle without freeing the texture in the backend
    464         API. */
    465     virtual void deleteTestingOnlyBackendTexture(GrBackendObject,
    466                                                  bool abandonTexture = false) = 0;
    467 
    468     // width and height may be larger than rt (if underlying API allows it).
    469     // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
    470     // the GrStencilAttachment.
    471     virtual GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
    472                                                                         int width,
    473                                                                         int height) = 0;
    474     // clears target's entire stencil buffer to 0
    475     virtual void clearStencil(GrRenderTarget* target) = 0;
    476 
    477     // Determines whether a texture will need to be rescaled in order to be used with the
    478     // GrSamplerParams. This variation is called when the caller will create a new texture using the
    479     // resource provider from a non-texture src (cpu-backed image, ...).
    480     bool isACopyNeededForTextureParams(int width, int height, const GrSamplerParams&,
    481                                        GrTextureProducer::CopyParams*,
    482                                        SkScalar scaleAdjust[2]) const;
    483 
    484     // Like the above but this variation should be called when the caller is not creating the
    485     // original texture but rather was handed the original texture. It adds additional checks
    486     // relevant to original textures that were created external to Skia via
    487     // GrResourceProvider::wrap methods.
    488     bool isACopyNeededForTextureParams(GrTextureProxy* proxy, const GrSamplerParams& params,
    489                                        GrTextureProducer::CopyParams* copyParams,
    490                                        SkScalar scaleAdjust[2]) const {
    491         if (this->isACopyNeededForTextureParams(proxy->width(), proxy->height(), params,
    492                                                 copyParams, scaleAdjust)) {
    493             return true;
    494         }
    495         return this->onIsACopyNeededForTextureParams(proxy, params, copyParams, scaleAdjust);
    496     }
    497 
    498     // This is only to be used in GL-specific tests.
    499     virtual const GrGLContext* glContextForTesting() const { return nullptr; }
    500 
    501     // This is only to be used by testing code
    502     virtual void resetShaderCacheForTesting() const {}
    503 
    504     void handleDirtyContext() {
    505         if (fResetBits) {
    506             this->resetContext();
    507         }
    508     }
    509 
    510 protected:
    511     static void ElevateDrawPreference(GrGpu::DrawPreference* preference,
    512                                       GrGpu::DrawPreference elevation) {
    513         GR_STATIC_ASSERT(GrGpu::kCallerPrefersDraw_DrawPreference > GrGpu::kNoDraw_DrawPreference);
    514         GR_STATIC_ASSERT(GrGpu::kGpuPrefersDraw_DrawPreference >
    515                          GrGpu::kCallerPrefersDraw_DrawPreference);
    516         GR_STATIC_ASSERT(GrGpu::kRequireDraw_DrawPreference >
    517                          GrGpu::kGpuPrefersDraw_DrawPreference);
    518         *preference = SkTMax(*preference, elevation);
    519     }
    520 
    521     // Handles cases where a surface will be updated without a call to flushRenderTarget
    522     void didWriteToSurface(GrSurface* surface, const SkIRect* bounds, uint32_t mipLevels = 1) const;
    523 
    524     Stats                            fStats;
    525     std::unique_ptr<GrPathRendering> fPathRendering;
    526     // Subclass must initialize this in its constructor.
    527     sk_sp<const GrCaps>              fCaps;
    528 
    529     typedef SkTArray<SkPoint, true> SamplePattern;
    530 
    531 private:
    532     // called when the 3D context state is unknown. Subclass should emit any
    533     // assumed 3D context state and dirty any state cache.
    534     virtual void onResetContext(uint32_t resetBits) = 0;
    535 
    536     // Called before certain draws in order to guarantee coherent results from dst reads.
    537     virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
    538 
    539     // overridden by backend-specific derived class to create objects.
    540     // Texture size and sample size will have already been validated in base class before
    541     // onCreateTexture is called.
    542     virtual sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc,
    543                                              SkBudgeted budgeted,
    544                                              const GrMipLevel texels[],
    545                                              int mipLevelCount) = 0;
    546 
    547     virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
    548                                                   GrSurfaceOrigin,
    549                                                   GrBackendTextureFlags,
    550                                                   int sampleCnt,
    551                                                   GrWrapOwnership) = 0;
    552     virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&,
    553                                                             GrSurfaceOrigin) = 0;
    554     virtual sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
    555                                                                      GrSurfaceOrigin,
    556                                                                      int sampleCnt)=0;
    557     virtual GrBuffer* onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern,
    558                                      const void* data) = 0;
    559 
    560     virtual gr_instanced::InstancedRendering* onCreateInstancedRendering() = 0;
    561     virtual std::unique_ptr<gr_instanced::OpAllocator> onCreateInstancedRenderingAllocator() {
    562         return nullptr;
    563     }
    564 
    565     virtual bool onIsACopyNeededForTextureParams(GrTextureProxy* proxy, const GrSamplerParams&,
    566                                                  GrTextureProducer::CopyParams*,
    567                                                  SkScalar scaleAdjust[2]) const {
    568         return false;
    569     }
    570 
    571     virtual bool onGetReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight,
    572                                      size_t rowBytes, GrPixelConfig readConfig, DrawPreference*,
    573                                      ReadPixelTempDrawInfo*) = 0;
    574     virtual bool onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
    575                                       GrPixelConfig srcConfig, DrawPreference*,
    576                                       WritePixelTempDrawInfo*) = 0;
    577 
    578     // overridden by backend-specific derived class to perform the surface read
    579     virtual bool onReadPixels(GrSurface*,
    580                               int left, int top,
    581                               int width, int height,
    582                               GrPixelConfig,
    583                               void* buffer,
    584                               size_t rowBytes) = 0;
    585 
    586     // overridden by backend-specific derived class to perform the surface write
    587     virtual bool onWritePixels(GrSurface*,
    588                                int left, int top, int width, int height,
    589                                GrPixelConfig config,
    590                                const GrMipLevel texels[], int mipLevelCount) = 0;
    591 
    592     // overridden by backend-specific derived class to perform the texture transfer
    593     virtual bool onTransferPixels(GrTexture*,
    594                                   int left, int top, int width, int height,
    595                                   GrPixelConfig config, GrBuffer* transferBuffer,
    596                                   size_t offset, size_t rowBytes) = 0;
    597 
    598     // overridden by backend-specific derived class to perform the resolve
    599     virtual void onResolveRenderTarget(GrRenderTarget* target) = 0;
    600 
    601     // overridden by backend specific derived class to perform the copy surface
    602     virtual bool onCopySurface(GrSurface* dst,
    603                                GrSurface* src,
    604                                const SkIRect& srcRect,
    605                                const SkIPoint& dstPoint) = 0;
    606 
    607     // overridden by backend specific derived class to perform the multisample queries
    608     virtual void onQueryMultisampleSpecs(GrRenderTarget*, const GrStencilSettings&,
    609                                          int* effectiveSampleCnt, SamplePattern*) = 0;
    610 
    611     void resetContext() {
    612         this->onResetContext(fResetBits);
    613         fResetBits = 0;
    614         ++fResetTimestamp;
    615     }
    616 
    617     struct SamplePatternComparator {
    618         bool operator()(const SamplePattern&, const SamplePattern&) const;
    619     };
    620 
    621     typedef std::map<SamplePattern, uint8_t, SamplePatternComparator> MultisampleSpecsIdMap;
    622 
    623     ResetTimestamp                         fResetTimestamp;
    624     uint32_t                               fResetBits;
    625     MultisampleSpecsIdMap                  fMultisampleSpecsIdMap;
    626     SkSTArray<1, MultisampleSpecs, true>   fMultisampleSpecs;
    627     // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
    628     GrContext*                             fContext;
    629 
    630     friend class GrPathRendering;
    631     friend class gr_instanced::InstancedOp; // for xferBarrier
    632     typedef SkRefCnt INHERITED;
    633 };
    634 
    635 #endif
    636