Home | History | Annotate | Download | only in vk
      1 /*
      2  * Copyright 2015 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #include "GrVkBuffer.h"
      9 #include "GrVkGpu.h"
     10 #include "GrVkMemory.h"
     11 #include "GrVkTransferBuffer.h"
     12 #include "GrVkUtil.h"
     13 
     14 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
     15 
     16 #ifdef SK_DEBUG
     17 #define VALIDATE() this->validate()
     18 #else
     19 #define VALIDATE() do {} while(false)
     20 #endif
     21 
     22 const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
     23     VkBuffer       buffer;
     24     GrVkAlloc      alloc;
     25 
     26     // create the buffer object
     27     VkBufferCreateInfo bufInfo;
     28     memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
     29     bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
     30     bufInfo.flags = 0;
     31     bufInfo.size = desc.fSizeInBytes;
     32     switch (desc.fType) {
     33         case kVertex_Type:
     34             bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
     35             break;
     36         case kIndex_Type:
     37             bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
     38             break;
     39         case kUniform_Type:
     40             bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
     41             break;
     42         case kCopyRead_Type:
     43             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
     44             break;
     45         case kCopyWrite_Type:
     46             bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
     47             break;
     48         case kTexel_Type:
     49             bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
     50     }
     51     if (!desc.fDynamic) {
     52         bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
     53     }
     54 
     55     bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
     56     bufInfo.queueFamilyIndexCount = 0;
     57     bufInfo.pQueueFamilyIndices = nullptr;
     58 
     59     VkResult err;
     60     err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
     61     if (err) {
     62         return nullptr;
     63     }
     64 
     65     if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
     66                                               buffer,
     67                                               desc.fType,
     68                                               desc.fDynamic,
     69                                               &alloc)) {
     70         return nullptr;
     71     }
     72 
     73     const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc, desc.fType);
     74     if (!resource) {
     75         VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
     76         GrVkMemory::FreeBufferMemory(gpu, desc.fType, alloc);
     77         return nullptr;
     78     }
     79 
     80     return resource;
     81 }
     82 
     83 void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
     84                                   VkAccessFlags srcAccessMask,
     85                                   VkAccessFlags dstAccesMask,
     86                                   VkPipelineStageFlags srcStageMask,
     87                                   VkPipelineStageFlags dstStageMask,
     88                                   bool byRegion) const {
     89     VkBufferMemoryBarrier bufferMemoryBarrier = {
     90         VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
     91         nullptr,                                 // pNext
     92         srcAccessMask,                           // srcAccessMask
     93         dstAccesMask,                            // dstAccessMask
     94         VK_QUEUE_FAMILY_IGNORED,                 // srcQueueFamilyIndex
     95         VK_QUEUE_FAMILY_IGNORED,                 // dstQueueFamilyIndex
     96         this->buffer(),                          // buffer
     97         0,                                       // offset
     98         fDesc.fSizeInBytes,                      // size
     99     };
    100 
    101     // TODO: restrict to area of buffer we're interested in
    102     gpu->addBufferMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
    103                                 &bufferMemoryBarrier);
    104 }
    105 
    106 void GrVkBuffer::Resource::freeGPUData(GrVkGpu* gpu) const {
    107     SkASSERT(fBuffer);
    108     SkASSERT(fAlloc.fMemory);
    109     VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
    110     GrVkMemory::FreeBufferMemory(gpu, fType, fAlloc);
    111 }
    112 
    113 void GrVkBuffer::vkRelease(const GrVkGpu* gpu) {
    114     VALIDATE();
    115     fResource->recycle(const_cast<GrVkGpu*>(gpu));
    116     fResource = nullptr;
    117     if (!fDesc.fDynamic) {
    118         delete[] (unsigned char*)fMapPtr;
    119     }
    120     fMapPtr = nullptr;
    121     VALIDATE();
    122 }
    123 
    124 void GrVkBuffer::vkAbandon() {
    125     fResource->unrefAndAbandon();
    126     fResource = nullptr;
    127     if (!fDesc.fDynamic) {
    128         delete[] (unsigned char*)fMapPtr;
    129     }
    130     fMapPtr = nullptr;
    131     VALIDATE();
    132 }
    133 
    134 VkAccessFlags buffer_type_to_access_flags(GrVkBuffer::Type type) {
    135     switch (type) {
    136         case GrVkBuffer::kIndex_Type:
    137             return VK_ACCESS_INDEX_READ_BIT;
    138         case GrVkBuffer::kVertex_Type:
    139             return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
    140         default:
    141             // This helper is only called for static buffers so we should only ever see index or
    142             // vertex buffers types
    143             SkASSERT(false);
    144             return 0;
    145     }
    146 }
    147 
    148 void GrVkBuffer::internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer) {
    149     VALIDATE();
    150     SkASSERT(!this->vkIsMapped());
    151 
    152     if (!fResource->unique()) {
    153         if (fDesc.fDynamic) {
    154             // in use by the command buffer, so we need to create a new one
    155             fResource->recycle(gpu);
    156             fResource = this->createResource(gpu, fDesc);
    157             if (createdNewBuffer) {
    158                 *createdNewBuffer = true;
    159             }
    160         } else {
    161             SkASSERT(fMapPtr);
    162             this->addMemoryBarrier(gpu,
    163                                    buffer_type_to_access_flags(fDesc.fType),
    164                                    VK_ACCESS_TRANSFER_WRITE_BIT,
    165                                    VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
    166                                    VK_PIPELINE_STAGE_TRANSFER_BIT,
    167                                    false);
    168         }
    169     }
    170 
    171     if (fDesc.fDynamic) {
    172         const GrVkAlloc& alloc = this->alloc();
    173         SkASSERT(alloc.fSize > 0);
    174         SkASSERT(alloc.fSize >= size);
    175         SkASSERT(0 == fOffset);
    176 
    177         fMapPtr = GrVkMemory::MapAlloc(gpu, alloc);
    178     } else {
    179         if (!fMapPtr) {
    180             fMapPtr = new unsigned char[this->size()];
    181         }
    182     }
    183 
    184     VALIDATE();
    185 }
    186 
    187 void GrVkBuffer::copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* src, size_t size) {
    188     SkASSERT(src);
    189     // The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
    190     // to 65536 bytes and a size the is 4 byte aligned.
    191     if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
    192         gpu->updateBuffer(this, src, this->offset(), size);
    193     } else {
    194         sk_sp<GrVkTransferBuffer> transferBuffer =
    195                 GrVkTransferBuffer::Make(gpu, size, GrVkBuffer::kCopyRead_Type);
    196         if (!transferBuffer) {
    197             return;
    198         }
    199 
    200         char* buffer = (char*) transferBuffer->map();
    201         memcpy (buffer, src, size);
    202         transferBuffer->unmap();
    203 
    204         gpu->copyBuffer(transferBuffer.get(), this, 0, this->offset(), size);
    205     }
    206     this->addMemoryBarrier(gpu,
    207                            VK_ACCESS_TRANSFER_WRITE_BIT,
    208                            buffer_type_to_access_flags(fDesc.fType),
    209                            VK_PIPELINE_STAGE_TRANSFER_BIT,
    210                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
    211                            false);
    212 }
    213 
    214 void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
    215     VALIDATE();
    216     SkASSERT(this->vkIsMapped());
    217 
    218     if (fDesc.fDynamic) {
    219         const GrVkAlloc& alloc = this->alloc();
    220         SkASSERT(alloc.fSize > 0);
    221         SkASSERT(alloc.fSize >= size);
    222         // We currently don't use fOffset
    223         SkASSERT(0 == fOffset);
    224 
    225         GrVkMemory::FlushMappedAlloc(gpu, alloc, 0, size);
    226         GrVkMemory::UnmapAlloc(gpu, alloc);
    227         fMapPtr = nullptr;
    228     } else {
    229         SkASSERT(fMapPtr);
    230         this->copyCpuDataToGpuBuffer(gpu, fMapPtr, size);
    231     }
    232 }
    233 
    234 bool GrVkBuffer::vkIsMapped() const {
    235     VALIDATE();
    236     return SkToBool(fMapPtr);
    237 }
    238 
    239 bool GrVkBuffer::vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
    240                               bool* createdNewBuffer) {
    241     if (srcSizeInBytes > fDesc.fSizeInBytes) {
    242         return false;
    243     }
    244 
    245     if (fDesc.fDynamic) {
    246         this->internalMap(gpu, srcSizeInBytes, createdNewBuffer);
    247         if (!fMapPtr) {
    248             return false;
    249         }
    250 
    251         memcpy(fMapPtr, src, srcSizeInBytes);
    252         this->internalUnmap(gpu, srcSizeInBytes);
    253     } else {
    254         this->copyCpuDataToGpuBuffer(gpu, src, srcSizeInBytes);
    255     }
    256 
    257 
    258     return true;
    259 }
    260 
    261 void GrVkBuffer::validate() const {
    262     SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType
    263              || kTexel_Type == fDesc.fType || kCopyRead_Type == fDesc.fType
    264              || kCopyWrite_Type == fDesc.fType || kUniform_Type == fDesc.fType);
    265 }
    266