1 /* Copyright (c) 2015-2016 The Khronos Group Inc. 2 * Copyright (c) 2015-2016 Valve Corporation 3 * Copyright (c) 2015-2016 LunarG, Inc. 4 * Copyright (C) 2015-2016 Google Inc. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 * 18 * Author: Courtney Goeltzenleuchter <courtneygo (at) google.com> 19 * Author: Tobin Ehlis <tobine (at) google.com> 20 * Author: Chris Forbes <chrisf (at) ijw.co.nz> 21 * Author: Mark Lobodzinski <mark (at) lunarg.com> 22 */ 23 24 // Check for noexcept support 25 #if defined(__clang__) 26 #if __has_feature(cxx_noexcept) 27 #define HAS_NOEXCEPT 28 #endif 29 #else 30 #if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46 31 #define HAS_NOEXCEPT 32 #else 33 #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026 && defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS 34 #define HAS_NOEXCEPT 35 #endif 36 #endif 37 #endif 38 39 #ifdef HAS_NOEXCEPT 40 #define NOEXCEPT noexcept 41 #else 42 #define NOEXCEPT 43 #endif 44 45 // Enable mem_tracker merged code 46 #define MTMERGE 1 47 48 #pragma once 49 #include "core_validation_error_enums.h" 50 #include "core_validation_types.h" 51 #include "descriptor_sets.h" 52 #include "vk_layer_logging.h" 53 #include "vk_safe_struct.h" 54 #include "vulkan/vk_layer.h" 55 #include <atomic> 56 #include <functional> 57 #include <memory> 58 #include <unordered_map> 59 #include <unordered_set> 60 #include <vector> 61 #include <list> 62 63 #if MTMERGE 64 65 /* 66 * MTMTODO : Update this comment 67 * Data Structure overview 68 * There are 4 global STL(' maps 69 * cbMap -- map of command Buffer (CB) objects to MT_CB_INFO structures 70 * Each MT_CB_INFO struct has an stl list container with 71 * memory objects that are referenced by this CB 72 * memObjMap -- map of Memory Objects to MT_MEM_OBJ_INFO structures 73 * Each MT_MEM_OBJ_INFO has two stl list containers with: 74 * -- all CBs referencing this mem obj 75 * -- all VK Objects that are bound to this memory 76 * objectMap -- map of objects to MT_OBJ_INFO structures 77 * 78 * Algorithm overview 79 * These are the primary events that should happen related to different objects 80 * 1. Command buffers 81 * CREATION - Add object,structure to map 82 * CMD BIND - If mem associated, add mem reference to list container 83 * DESTROY - Remove from map, decrement (and report) mem references 84 * 2. Mem Objects 85 * CREATION - Add object,structure to map 86 * OBJ BIND - Add obj structure to list container for that mem node 87 * CMB BIND - If mem-related add CB structure to list container for that mem node 88 * DESTROY - Flag as errors any remaining refs and remove from map 89 * 3. Generic Objects 90 * MEM BIND - DESTROY any previous binding, Add obj node w/ ref to map, add obj ref to list container for that mem node 91 * DESTROY - If mem bound, remove reference list container for that memInfo, remove object ref from map 92 */ 93 // TODO : Is there a way to track when Cmd Buffer finishes & remove mem references at that point? 94 // TODO : Could potentially store a list of freed mem allocs to flag when they're incorrectly used 95 96 struct MT_FB_ATTACHMENT_INFO { 97 VkImage image; 98 VkDeviceMemory mem; 99 }; 100 101 struct MT_DESCRIPTOR_SET_INFO { 102 std::vector<VkImageView> images; 103 std::vector<VkBuffer> buffers; 104 }; 105 106 // Track Swapchain Information 107 struct MT_SWAP_CHAIN_INFO { 108 VkSwapchainCreateInfoKHR createInfo; 109 std::vector<VkImage> images; 110 }; 111 #endif 112 113 struct SHADER_DS_MAPPING { 114 uint32_t slotCount; 115 VkDescriptorSetLayoutCreateInfo *pShaderMappingSlot; 116 }; 117 118 struct GENERIC_HEADER { 119 VkStructureType sType; 120 const void *pNext; 121 }; 122 123 struct IMAGE_LAYOUT_NODE { 124 VkImageLayout layout; 125 VkFormat format; 126 }; 127 128 // Store layouts and pushconstants for PipelineLayout 129 struct PIPELINE_LAYOUT_NODE { 130 std::vector<VkDescriptorSetLayout> descriptorSetLayouts; 131 std::vector<cvdescriptorset::DescriptorSetLayout const *> setLayouts; 132 std::vector<VkPushConstantRange> pushConstantRanges; 133 }; 134 135 class PIPELINE_NODE { 136 public: 137 VkPipeline pipeline; 138 safe_VkGraphicsPipelineCreateInfo graphicsPipelineCI; 139 safe_VkComputePipelineCreateInfo computePipelineCI; 140 // Flag of which shader stages are active for this pipeline 141 uint32_t active_shaders; 142 uint32_t duplicate_shaders; 143 // Capture which slots (set#->bindings) are actually used by the shaders of this pipeline 144 std::unordered_map<uint32_t, std::unordered_set<uint32_t>> active_slots; 145 // Vtx input info (if any) 146 std::vector<VkVertexInputBindingDescription> vertexBindingDescriptions; 147 std::vector<VkVertexInputAttributeDescription> vertexAttributeDescriptions; 148 std::vector<VkPipelineColorBlendAttachmentState> attachments; 149 bool blendConstantsEnabled; // Blend constants enabled for any attachments 150 RENDER_PASS_NODE *renderPass; 151 PIPELINE_LAYOUT_NODE const *pipelineLayout; 152 153 // Default constructor 154 PIPELINE_NODE() 155 : pipeline{}, graphicsPipelineCI{}, computePipelineCI{}, active_shaders(0), duplicate_shaders(0), active_slots(), vertexBindingDescriptions(), 156 vertexAttributeDescriptions(), attachments(), blendConstantsEnabled(false), renderPass(nullptr), pipelineLayout(nullptr) {} 157 158 void initGraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo) { 159 graphicsPipelineCI.initialize(pCreateInfo); 160 // Make sure compute pipeline is null 161 VkComputePipelineCreateInfo emptyComputeCI = {}; 162 computePipelineCI.initialize(&emptyComputeCI); 163 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { 164 const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i]; 165 this->duplicate_shaders |= this->active_shaders & pPSSCI->stage; 166 this->active_shaders |= pPSSCI->stage; 167 } 168 if (pCreateInfo->pVertexInputState) { 169 const VkPipelineVertexInputStateCreateInfo *pVICI = pCreateInfo->pVertexInputState; 170 if (pVICI->vertexBindingDescriptionCount) { 171 this->vertexBindingDescriptions = std::vector<VkVertexInputBindingDescription>( 172 pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount); 173 } 174 if (pVICI->vertexAttributeDescriptionCount) { 175 this->vertexAttributeDescriptions = std::vector<VkVertexInputAttributeDescription>( 176 pVICI->pVertexAttributeDescriptions, 177 pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount); 178 } 179 } 180 if (pCreateInfo->pColorBlendState) { 181 const VkPipelineColorBlendStateCreateInfo *pCBCI = pCreateInfo->pColorBlendState; 182 if (pCBCI->attachmentCount) { 183 this->attachments = std::vector<VkPipelineColorBlendAttachmentState>(pCBCI->pAttachments, 184 pCBCI->pAttachments + pCBCI->attachmentCount); 185 } 186 } 187 } 188 void initComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo) { 189 computePipelineCI.initialize(pCreateInfo); 190 // Make sure gfx pipeline is null 191 VkGraphicsPipelineCreateInfo emptyGraphicsCI = {}; 192 graphicsPipelineCI.initialize(&emptyGraphicsCI); 193 switch (computePipelineCI.stage.stage) { 194 case VK_SHADER_STAGE_COMPUTE_BIT: 195 this->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT; 196 break; 197 default: 198 // TODO : Flag error 199 break; 200 } 201 } 202 }; 203 204 class PHYS_DEV_PROPERTIES_NODE { 205 public: 206 VkPhysicalDeviceProperties properties; 207 VkPhysicalDeviceFeatures features; 208 std::vector<VkQueueFamilyProperties> queue_family_properties; 209 }; 210 211 class FENCE_NODE : public BASE_NODE { 212 public: 213 using BASE_NODE::in_use; 214 215 VkSwapchainKHR swapchain; // Swapchain that this fence is submitted against or NULL 216 bool firstTimeFlag; // Fence was created in signaled state, avoid warnings for first use 217 VkFenceCreateInfo createInfo; 218 std::unordered_set<VkQueue> queues; 219 std::vector<VkCommandBuffer> cmdBuffers; 220 bool needsSignaled; 221 std::vector<VkFence> priorFences; 222 223 // Default constructor 224 FENCE_NODE() : swapchain(VK_NULL_HANDLE), firstTimeFlag(false), needsSignaled(false){}; 225 }; 226 227 class SEMAPHORE_NODE : public BASE_NODE { 228 public: 229 using BASE_NODE::in_use; 230 bool signaled; 231 VkQueue queue; 232 }; 233 234 class EVENT_NODE : public BASE_NODE { 235 public: 236 using BASE_NODE::in_use; 237 int write_in_use; 238 bool needsSignaled; 239 VkPipelineStageFlags stageMask; 240 }; 241 242 class QUEUE_NODE { 243 public: 244 VkDevice device; 245 std::vector<VkFence> lastFences; 246 #if MTMERGE 247 // MTMTODO : merge cmd_buffer data structs here 248 std::list<VkCommandBuffer> pQueueCommandBuffers; 249 std::list<VkDeviceMemory> pMemRefList; 250 #endif 251 std::vector<VkCommandBuffer> untrackedCmdBuffers; 252 std::unordered_map<VkEvent, VkPipelineStageFlags> eventToStageMap; 253 std::unordered_map<QueryObject, bool> queryToStateMap; // 0 is unavailable, 1 is available 254 }; 255 256 class QUERY_POOL_NODE : public BASE_NODE { 257 public: 258 VkQueryPoolCreateInfo createInfo; 259 }; 260 261 class FRAMEBUFFER_NODE { 262 public: 263 VkFramebufferCreateInfo createInfo; 264 std::unordered_set<VkCommandBuffer> referencingCmdBuffers; 265 std::vector<MT_FB_ATTACHMENT_INFO> attachments; 266 }; 267 268 struct DESCRIPTOR_POOL_NODE { 269 VkDescriptorPool pool; 270 uint32_t maxSets; // Max descriptor sets allowed in this pool 271 uint32_t availableSets; // Available descriptor sets in this pool 272 273 VkDescriptorPoolCreateInfo createInfo; 274 std::unordered_set<cvdescriptorset::DescriptorSet *> sets; // Collection of all sets in this pool 275 std::vector<uint32_t> maxDescriptorTypeCount; // Max # of descriptors of each type in this pool 276 std::vector<uint32_t> availableDescriptorTypeCount; // Available # of descriptors of each type in this pool 277 278 DESCRIPTOR_POOL_NODE(const VkDescriptorPool pool, const VkDescriptorPoolCreateInfo *pCreateInfo) 279 : pool(pool), maxSets(pCreateInfo->maxSets), availableSets(pCreateInfo->maxSets), createInfo(*pCreateInfo), 280 maxDescriptorTypeCount(VK_DESCRIPTOR_TYPE_RANGE_SIZE, 0), availableDescriptorTypeCount(VK_DESCRIPTOR_TYPE_RANGE_SIZE, 0) { 281 if (createInfo.poolSizeCount) { // Shadow type struct from ptr into local struct 282 size_t poolSizeCountSize = createInfo.poolSizeCount * sizeof(VkDescriptorPoolSize); 283 createInfo.pPoolSizes = new VkDescriptorPoolSize[poolSizeCountSize]; 284 memcpy((void *)createInfo.pPoolSizes, pCreateInfo->pPoolSizes, poolSizeCountSize); 285 // Now set max counts for each descriptor type based on count of that type times maxSets 286 uint32_t i = 0; 287 for (i = 0; i < createInfo.poolSizeCount; ++i) { 288 uint32_t typeIndex = static_cast<uint32_t>(createInfo.pPoolSizes[i].type); 289 // Same descriptor types can appear several times 290 maxDescriptorTypeCount[typeIndex] += createInfo.pPoolSizes[i].descriptorCount; 291 availableDescriptorTypeCount[typeIndex] = maxDescriptorTypeCount[typeIndex]; 292 } 293 } else { 294 createInfo.pPoolSizes = NULL; // Make sure this is NULL so we don't try to clean it up 295 } 296 } 297 ~DESCRIPTOR_POOL_NODE() { 298 delete[] createInfo.pPoolSizes; 299 // TODO : pSets are currently freed in deletePools function which uses freeShadowUpdateTree function 300 // need to migrate that struct to smart ptrs for auto-cleanup 301 } 302 }; 303 304 typedef struct stencil_data { 305 uint32_t compareMask; 306 uint32_t writeMask; 307 uint32_t reference; 308 } CBStencilData; 309