1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrVkGpu.h" 9 #include "GrVkImage.h" 10 #include "GrVkMemory.h" 11 #include "GrVkUtil.h" 12 13 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X) 14 15 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) { 16 switch (format) { 17 case VK_FORMAT_S8_UINT: 18 return VK_IMAGE_ASPECT_STENCIL_BIT; 19 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough 20 case VK_FORMAT_D32_SFLOAT_S8_UINT: 21 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; 22 default: 23 SkASSERT(GrVkFormatIsSupported(format)); 24 return VK_IMAGE_ASPECT_COLOR_BIT; 25 } 26 } 27 28 void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout, 29 VkAccessFlags dstAccessMask, 30 VkPipelineStageFlags dstStageMask, 31 bool byRegion) { 32 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED != newLayout && 33 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout); 34 VkImageLayout currentLayout = this->currentLayout(); 35 36 // If the old and new layout are the same and the layout is a read only layout, there is no need 37 // to put in a barrier. 38 if (newLayout == currentLayout && 39 (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout || 40 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout || 41 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) { 42 return; 43 } 44 45 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(currentLayout); 46 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(currentLayout); 47 48 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat); 49 VkImageMemoryBarrier imageMemoryBarrier = { 50 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType 51 nullptr, // pNext 52 srcAccessMask, // outputMask 53 dstAccessMask, // inputMask 54 currentLayout, // oldLayout 55 newLayout, // newLayout 56 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex 57 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex 58 fInfo.fImage, // image 59 { aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange 60 }; 61 62 gpu->addImageMemoryBarrier(srcStageMask, dstStageMask, byRegion, &imageMemoryBarrier); 63 64 fInfo.fImageLayout = newLayout; 65 } 66 67 bool GrVkImage::InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) { 68 if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) { 69 return false; 70 } 71 VkImage image = 0; 72 GrVkAlloc alloc; 73 74 bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling; 75 VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED 76 : VK_IMAGE_LAYOUT_UNDEFINED; 77 78 // Create Image 79 VkSampleCountFlagBits vkSamples; 80 if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) { 81 return false; 82 } 83 84 SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling || 85 VK_SAMPLE_COUNT_1_BIT == vkSamples); 86 87 // sRGB format images may need to be aliased to linear for various reasons (legacy mode): 88 VkImageCreateFlags createFlags = GrVkFormatIsSRGB(imageDesc.fFormat, nullptr) 89 ? VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT : 0; 90 91 const VkImageCreateInfo imageCreateInfo = { 92 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType 93 nullptr, // pNext 94 createFlags, // VkImageCreateFlags 95 imageDesc.fImageType, // VkImageType 96 imageDesc.fFormat, // VkFormat 97 { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D 98 imageDesc.fLevels, // mipLevels 99 1, // arrayLayers 100 vkSamples, // samples 101 imageDesc.fImageTiling, // VkImageTiling 102 imageDesc.fUsageFlags, // VkImageUsageFlags 103 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode 104 0, // queueFamilyCount 105 0, // pQueueFamilyIndices 106 initialLayout // initialLayout 107 }; 108 109 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateImage(gpu->device(), &imageCreateInfo, nullptr, 110 &image)); 111 112 if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, isLinear, &alloc)) { 113 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr)); 114 return false; 115 } 116 117 info->fImage = image; 118 info->fAlloc = alloc; 119 info->fImageTiling = imageDesc.fImageTiling; 120 info->fImageLayout = initialLayout; 121 info->fFormat = imageDesc.fFormat; 122 info->fLevelCount = imageDesc.fLevels; 123 return true; 124 } 125 126 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) { 127 VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr)); 128 bool isLinear = VK_IMAGE_TILING_LINEAR == info->fImageTiling; 129 GrVkMemory::FreeImageMemory(gpu, isLinear, info->fAlloc); 130 } 131 132 void GrVkImage::setNewResource(VkImage image, const GrVkAlloc& alloc, VkImageTiling tiling) { 133 fResource = new Resource(image, alloc, tiling); 134 } 135 136 GrVkImage::~GrVkImage() { 137 // should have been released or abandoned first 138 SkASSERT(!fResource); 139 } 140 141 void GrVkImage::releaseImage(const GrVkGpu* gpu) { 142 if (fResource) { 143 fResource->unref(gpu); 144 fResource = nullptr; 145 } 146 } 147 148 void GrVkImage::abandonImage() { 149 if (fResource) { 150 fResource->unrefAndAbandon(); 151 fResource = nullptr; 152 } 153 } 154 155 void GrVkImage::setResourceRelease(sk_sp<GrReleaseProcHelper> releaseHelper) { 156 // Forward the release proc on to GrVkImage::Resource 157 fResource->setRelease(std::move(releaseHelper)); 158 } 159 160 void GrVkImage::Resource::freeGPUData(const GrVkGpu* gpu) const { 161 SkASSERT(!fReleaseHelper); 162 VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr)); 163 bool isLinear = (VK_IMAGE_TILING_LINEAR == fImageTiling); 164 GrVkMemory::FreeImageMemory(gpu, isLinear, fAlloc); 165 } 166 167 void GrVkImage::BorrowedResource::freeGPUData(const GrVkGpu* gpu) const { 168 this->invokeReleaseProc(); 169 } 170 171 void GrVkImage::BorrowedResource::abandonGPUData() const { 172 this->invokeReleaseProc(); 173 } 174 175