1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrVkBuffer.h" 9 #include "GrVkGpu.h" 10 #include "GrVkMemory.h" 11 #include "GrVkUtil.h" 12 13 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X) 14 15 #ifdef SK_DEBUG 16 #define VALIDATE() this->validate() 17 #else 18 #define VALIDATE() do {} while(false) 19 #endif 20 21 const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) { 22 VkBuffer buffer; 23 VkDeviceMemory alloc; 24 25 // create the buffer object 26 VkBufferCreateInfo bufInfo; 27 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo)); 28 bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; 29 bufInfo.flags = 0; 30 bufInfo.size = desc.fSizeInBytes; 31 switch (desc.fType) { 32 case kVertex_Type: 33 bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; 34 break; 35 case kIndex_Type: 36 bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; 37 break; 38 case kUniform_Type: 39 bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; 40 break; 41 case kCopyRead_Type: 42 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; 43 break; 44 case kCopyWrite_Type: 45 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; 46 break; 47 48 } 49 bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 50 bufInfo.queueFamilyIndexCount = 0; 51 bufInfo.pQueueFamilyIndices = nullptr; 52 53 VkResult err; 54 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer)); 55 if (err) { 56 return nullptr; 57 } 58 59 VkMemoryPropertyFlags requiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | 60 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; 61 62 if (!GrVkMemory::AllocAndBindBufferMemory(gpu, 63 buffer, 64 requiredMemProps, 65 &alloc)) { 66 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr)); 67 return nullptr; 68 } 69 70 const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc); 71 if (!resource) { 72 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr)); 73 VK_CALL(gpu, FreeMemory(gpu->device(), alloc, nullptr)); 74 return nullptr; 75 } 76 77 return resource; 78 } 79 80 81 void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu, 82 VkAccessFlags srcAccessMask, 83 VkAccessFlags dstAccesMask, 84 VkPipelineStageFlags srcStageMask, 85 VkPipelineStageFlags dstStageMask, 86 bool byRegion) const { 87 VkBufferMemoryBarrier bufferMemoryBarrier = { 88 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType 89 NULL, // pNext 90 srcAccessMask, // srcAccessMask 91 dstAccesMask, // dstAccessMask 92 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex 93 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex 94 this->buffer(), // buffer 95 0, // offset 96 fDesc.fSizeInBytes, // size 97 }; 98 99 // TODO: restrict to area of buffer we're interested in 100 gpu->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion, &bufferMemoryBarrier); 101 } 102 103 void GrVkBuffer::Resource::freeGPUData(const GrVkGpu* gpu) const { 104 SkASSERT(fBuffer); 105 SkASSERT(fAlloc); 106 VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr)); 107 VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr)); 108 } 109 110 void GrVkBuffer::vkRelease(const GrVkGpu* gpu) { 111 VALIDATE(); 112 fResource->unref(gpu); 113 fResource = nullptr; 114 fMapPtr = nullptr; 115 VALIDATE(); 116 } 117 118 void GrVkBuffer::vkAbandon() { 119 fResource->unrefAndAbandon(); 120 fMapPtr = nullptr; 121 VALIDATE(); 122 } 123 124 void* GrVkBuffer::vkMap(const GrVkGpu* gpu) { 125 VALIDATE(); 126 SkASSERT(!this->vkIsMapped()); 127 128 VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, VK_WHOLE_SIZE, 0, &fMapPtr)); 129 if (err) { 130 fMapPtr = nullptr; 131 } 132 133 VALIDATE(); 134 return fMapPtr; 135 } 136 137 void GrVkBuffer::vkUnmap(const GrVkGpu* gpu) { 138 VALIDATE(); 139 SkASSERT(this->vkIsMapped()); 140 141 VK_CALL(gpu, UnmapMemory(gpu->device(), alloc())); 142 143 fMapPtr = nullptr; 144 } 145 146 bool GrVkBuffer::vkIsMapped() const { 147 VALIDATE(); 148 return SkToBool(fMapPtr); 149 } 150 151 bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes) { 152 SkASSERT(!this->vkIsMapped()); 153 VALIDATE(); 154 if (srcSizeInBytes > fDesc.fSizeInBytes) { 155 return false; 156 } 157 158 void* mapPtr; 159 VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, srcSizeInBytes, 0, &mapPtr)); 160 161 if (VK_SUCCESS != err) { 162 return false; 163 } 164 165 memcpy(mapPtr, src, srcSizeInBytes); 166 167 VK_CALL(gpu, UnmapMemory(gpu->device(), alloc())); 168 169 return true; 170 } 171 172 void GrVkBuffer::validate() const { 173 SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType 174 || kCopyRead_Type == fDesc.fType || kCopyWrite_Type == fDesc.fType 175 || kUniform_Type == fDesc.fType); 176 } 177 178