Home | History | Annotate | Download | only in vk
      1 /*
      2  * Copyright 2015 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #ifndef GrVkGpu_DEFINED
      9 #define GrVkGpu_DEFINED
     10 
     11 #include "GrGpu.h"
     12 #include "GrVkCaps.h"
     13 #include "GrVkCopyManager.h"
     14 #include "GrVkIndexBuffer.h"
     15 #include "GrVkMemory.h"
     16 #include "GrVkResourceProvider.h"
     17 #include "GrVkSemaphore.h"
     18 #include "GrVkVertexBuffer.h"
     19 #include "GrVkUtil.h"
     20 #include "vk/GrVkBackendContext.h"
     21 #include "vk/GrVkTypes.h"
     22 
     23 class GrPipeline;
     24 
     25 class GrVkBufferImpl;
     26 class GrVkCommandPool;
     27 class GrVkGpuRTCommandBuffer;
     28 class GrVkGpuTextureCommandBuffer;
     29 class GrVkMemoryAllocator;
     30 class GrVkPipeline;
     31 class GrVkPipelineState;
     32 class GrVkPrimaryCommandBuffer;
     33 class GrVkRenderPass;
     34 class GrVkSecondaryCommandBuffer;
     35 class GrVkTexture;
     36 struct GrVkInterface;
     37 
     38 namespace SkSL {
     39     class Compiler;
     40 }
     41 
     42 class GrVkGpu : public GrGpu {
     43 public:
     44     static sk_sp<GrGpu> Make(const GrVkBackendContext&, const GrContextOptions&, GrContext*);
     45 
     46     ~GrVkGpu() override;
     47 
     48     void disconnect(DisconnectType) override;
     49 
     50     const GrVkInterface* vkInterface() const { return fInterface.get(); }
     51     const GrVkCaps& vkCaps() const { return *fVkCaps; }
     52 
     53     GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); }
     54 
     55     VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; }
     56     VkDevice device() const { return fDevice; }
     57     VkQueue  queue() const { return fQueue; }
     58     uint32_t  queueIndex() const { return fQueueIndex; }
     59     GrVkCommandPool* cmdPool() const { return fCmdPool; }
     60     const VkPhysicalDeviceProperties& physicalDeviceProperties() const {
     61         return fPhysDevProps;
     62     }
     63     const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const {
     64         return fPhysDevMemProps;
     65     }
     66 
     67     GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
     68 
     69     GrVkPrimaryCommandBuffer* currentCommandBuffer() { return fCurrentCmdBuffer; }
     70 
     71     enum SyncQueue {
     72         kForce_SyncQueue,
     73         kSkip_SyncQueue
     74     };
     75 
     76     void querySampleLocations(
     77             GrRenderTarget*, const GrStencilSettings&, SkTArray<SkPoint>*) override {
     78         SkASSERT(!this->caps()->sampleLocationsSupport());
     79         SK_ABORT("Sample locations not yet implemented for Vulkan.");
     80     }
     81 
     82     void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
     83 
     84 #if GR_TEST_UTILS
     85     GrBackendTexture createTestingOnlyBackendTexture(const void* pixels, int w, int h,
     86                                                      GrColorType colorType, bool isRenderTarget,
     87                                                      GrMipMapped, size_t rowBytes = 0) override;
     88     bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
     89     void deleteTestingOnlyBackendTexture(const GrBackendTexture&) override;
     90 
     91     GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h, GrColorType) override;
     92     void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
     93 
     94     void testingOnly_flushGpuAndSync() override;
     95 #endif
     96 
     97     GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
     98                                                                 int width,
     99                                                                 int height) override;
    100 
    101     GrGpuRTCommandBuffer* getCommandBuffer(
    102             GrRenderTarget*, GrSurfaceOrigin, const SkRect&,
    103             const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
    104             const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) override;
    105 
    106     GrGpuTextureCommandBuffer* getCommandBuffer(GrTexture*, GrSurfaceOrigin) override;
    107 
    108 
    109     void addMemoryBarrier(VkPipelineStageFlags srcStageMask,
    110                           VkPipelineStageFlags dstStageMask,
    111                           bool byRegion,
    112                           VkMemoryBarrier* barrier) const;
    113     void addBufferMemoryBarrier(const GrVkResource*,
    114                                 VkPipelineStageFlags srcStageMask,
    115                                 VkPipelineStageFlags dstStageMask,
    116                                 bool byRegion,
    117                                 VkBufferMemoryBarrier* barrier) const;
    118     void addImageMemoryBarrier(const GrVkResource*,
    119                                VkPipelineStageFlags srcStageMask,
    120                                VkPipelineStageFlags dstStageMask,
    121                                bool byRegion,
    122                                VkImageMemoryBarrier* barrier) const;
    123 
    124     SkSL::Compiler* shaderCompiler() const {
    125         return fCompiler;
    126     }
    127 
    128     bool onRegenerateMipMapLevels(GrTexture* tex) override;
    129 
    130     void resolveRenderTargetNoFlush(GrRenderTarget* target) {
    131         this->internalResolveRenderTarget(target, false);
    132     }
    133 
    134     void onResolveRenderTarget(GrRenderTarget* target) override {
    135         // This resolve is called when we are preparing an msaa surface for external I/O. It is
    136         // called after flushing, so we need to make sure we submit the command buffer after doing
    137         // the resolve so that the resolve actually happens.
    138         this->internalResolveRenderTarget(target, true);
    139     }
    140 
    141     void submitSecondaryCommandBuffer(const SkTArray<GrVkSecondaryCommandBuffer*>&,
    142                                       const GrVkRenderPass*,
    143                                       const VkClearValue* colorClear,
    144                                       GrVkRenderTarget*, GrSurfaceOrigin,
    145                                       const SkIRect& bounds);
    146 
    147     void submit(GrGpuCommandBuffer*) override;
    148 
    149     GrFence SK_WARN_UNUSED_RESULT insertFence() override;
    150     bool waitFence(GrFence, uint64_t timeout) override;
    151     void deleteFence(GrFence) const override;
    152 
    153     sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override;
    154     sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
    155                                             GrResourceProvider::SemaphoreWrapType wrapType,
    156                                             GrWrapOwnership ownership) override;
    157     void insertSemaphore(sk_sp<GrSemaphore> semaphore) override;
    158     void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
    159 
    160     // These match the definitions in SkDrawable, from whence they came
    161     typedef void* SubmitContext;
    162     typedef void (*SubmitProc)(SubmitContext submitContext);
    163 
    164     // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary
    165     // command buffer to the gpu.
    166     void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable);
    167 
    168     sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
    169 
    170     void copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
    171                     VkDeviceSize dstOffset, VkDeviceSize size);
    172     bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size);
    173 
    174     uint32_t getExtraSamplerKeyForProgram(const GrSamplerState&,
    175                                           const GrBackendFormat& format) override;
    176 
    177     enum PersistentCacheKeyType : uint32_t {
    178         kShader_PersistentCacheKeyType = 0,
    179         kPipelineCache_PersistentCacheKeyType = 1,
    180     };
    181 
    182     void storeVkPipelineCacheData() override;
    183 
    184 private:
    185     GrVkGpu(GrContext*, const GrContextOptions&, const GrVkBackendContext&,
    186             sk_sp<const GrVkInterface>, uint32_t instanceVersion, uint32_t physicalDeviceVersion);
    187 
    188     void onResetContext(uint32_t resetBits) override {}
    189 
    190     void destroyResources();
    191 
    192     sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc&, SkBudgeted, const GrMipLevel[],
    193                                      int mipLevelCount) override;
    194 
    195     sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable,
    196                                           GrIOType) override;
    197     sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
    198                                                     int sampleCnt,
    199                                                     GrWrapOwnership,
    200                                                     GrWrapCacheable) override;
    201     sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override;
    202 
    203     sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
    204                                                              int sampleCnt) override;
    205 
    206     sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
    207                                                                 const GrVkDrawableInfo&) override;
    208 
    209     sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern,
    210                                       const void* data) override;
    211 
    212     bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
    213                       void* buffer, size_t rowBytes) override;
    214 
    215     bool onWritePixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
    216                        const GrMipLevel texels[], int mipLevelCount) override;
    217 
    218     bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType,
    219                           GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
    220 
    221     bool onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src,
    222                        GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,
    223                        const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) override;
    224 
    225     void onFinishFlush(GrSurfaceProxy*, SkSurface::BackendSurfaceAccess access, GrFlushFlags flags,
    226                        bool insertedSemaphores, GrGpuFinishedProc finishedProc,
    227                        GrGpuFinishedContext finishedContext) override;
    228 
    229     // Ends and submits the current command buffer to the queue and then creates a new command
    230     // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
    231     // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in
    232     // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command
    233     // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those
    234     // wait semaphores to the submission of this command buffer.
    235     void submitCommandBuffer(SyncQueue sync, GrGpuFinishedProc finishedProc = nullptr,
    236                              GrGpuFinishedContext finishedContext = nullptr);
    237 
    238     void internalResolveRenderTarget(GrRenderTarget*, bool requiresSubmit);
    239 
    240     void copySurfaceAsCopyImage(GrSurface* dst, GrSurfaceOrigin dstOrigin,
    241                                 GrSurface* src, GrSurfaceOrigin srcOrigin,
    242                                 GrVkImage* dstImage, GrVkImage* srcImage,
    243                                 const SkIRect& srcRect,
    244                                 const SkIPoint& dstPoint);
    245 
    246     void copySurfaceAsBlit(GrSurface* dst, GrSurfaceOrigin dstOrigin,
    247                            GrSurface* src, GrSurfaceOrigin srcOrigin,
    248                            GrVkImage* dstImage, GrVkImage* srcImage,
    249                            const SkIRect& srcRect,
    250                            const SkIPoint& dstPoint);
    251 
    252     void copySurfaceAsResolve(GrSurface* dst, GrSurfaceOrigin dstOrigin,
    253                               GrSurface* src, GrSurfaceOrigin srcOrigin,
    254                               const SkIRect& srcRect,
    255                               const SkIPoint& dstPoint);
    256 
    257     // helpers for onCreateTexture and writeTexturePixels
    258     bool uploadTexDataLinear(GrVkTexture* tex, int left, int top, int width, int height,
    259                              GrColorType colorType, const void* data, size_t rowBytes);
    260     bool uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int width, int height,
    261                               GrColorType colorType, const GrMipLevel texels[], int mipLevelCount);
    262     bool uploadTexDataCompressed(GrVkTexture* tex, int left, int top, int width, int height,
    263                                  GrColorType dataColorType, const GrMipLevel texels[],
    264                                  int mipLevelCount);
    265     void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
    266                       const SkIPoint& dstPoint);
    267 
    268 #if GR_TEST_UTILS
    269     bool createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool texturable,
    270                                   bool renderable, GrMipMapped mipMapped, const void* srcData,
    271                                   size_t srcRowBytes, GrVkImageInfo* info);
    272 #endif
    273 
    274     sk_sp<const GrVkInterface>                            fInterface;
    275     sk_sp<GrVkMemoryAllocator>                            fMemoryAllocator;
    276     sk_sp<GrVkCaps>                                       fVkCaps;
    277 
    278     VkInstance                                            fInstance;
    279     VkPhysicalDevice                                      fPhysicalDevice;
    280     VkDevice                                              fDevice;
    281     VkQueue                                               fQueue;    // Must be Graphics queue
    282     uint32_t                                              fQueueIndex;
    283 
    284     // Created by GrVkGpu
    285     GrVkResourceProvider                                  fResourceProvider;
    286 
    287     GrVkCommandPool*                                      fCmdPool;
    288 
    289     // just a raw pointer; object's lifespan is managed by fCmdPool
    290     GrVkPrimaryCommandBuffer*                             fCurrentCmdBuffer;
    291 
    292     SkSTArray<1, GrVkSemaphore::Resource*>                fSemaphoresToWaitOn;
    293     SkSTArray<1, GrVkSemaphore::Resource*>                fSemaphoresToSignal;
    294 
    295     SkTArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables;
    296 
    297     VkPhysicalDeviceProperties                            fPhysDevProps;
    298     VkPhysicalDeviceMemoryProperties                      fPhysDevMemProps;
    299 
    300     GrVkCopyManager                                       fCopyManager;
    301 
    302     // compiler used for compiling sksl into spirv. We only want to create the compiler once since
    303     // there is significant overhead to the first compile of any compiler.
    304     SkSL::Compiler*                                       fCompiler;
    305 
    306     // We need a bool to track whether or not we've already disconnected all the gpu resources from
    307     // vulkan context.
    308     bool                                                  fDisconnected;
    309 
    310     std::unique_ptr<GrVkGpuRTCommandBuffer>               fCachedRTCommandBuffer;
    311     std::unique_ptr<GrVkGpuTextureCommandBuffer>          fCachedTexCommandBuffer;
    312 
    313     typedef GrGpu INHERITED;
    314 };
    315 
    316 #endif
    317