1 /* Copyright (c) 2015-2016 The Khronos Group Inc. 2 * Copyright (c) 2015-2016 Valve Corporation 3 * Copyright (c) 2015-2016 LunarG, Inc. 4 * Copyright (C) 2015-2016 Google Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and/or associated documentation files (the "Materials"), to 8 * deal in the Materials without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Materials, and to permit persons to whom the Materials 11 * are furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice(s) and this permission notice shall be included 14 * in all copies or substantial portions of the Materials. 15 * 16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 * 20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE 23 * USE OR OTHER DEALINGS IN THE MATERIALS 24 * 25 * Author: Jon Ashburn <jon (at) lunarg.com> 26 * Author: Mark Lobodzinski <mark (at) lunarg.com> 27 * Author: Tobin Ehlis <tobin (at) lunarg.com> 28 */ 29 30 #include "vulkan/vk_layer.h" 31 #include "vk_layer_extension_utils.h" 32 #include "vk_enum_string_helper.h" 33 #include "vk_layer_table.h" 34 #include "vk_layer_utils.h" 35 36 // Object Tracker ERROR codes 37 typedef enum _OBJECT_TRACK_ERROR { 38 OBJTRACK_NONE, // Used for INFO & other non-error messages 39 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list 40 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer 41 OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed 42 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed 43 OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count 44 OBJTRACK_INVALID_OBJECT, // Object used that has never been created 45 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, // Descriptor Pools specified incorrectly 46 OBJTRACK_COMMAND_POOL_MISMATCH, // Command Pools specified incorrectly 47 } OBJECT_TRACK_ERROR; 48 49 // Object Status -- used to track state of individual objects 50 typedef VkFlags ObjectStatusFlags; 51 typedef enum _ObjectStatusFlagBits { 52 OBJSTATUS_NONE = 0x00000000, // No status is set 53 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted 54 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound 55 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound 56 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound 57 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound 58 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped 59 OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY 60 } ObjectStatusFlagBits; 61 62 typedef struct _OBJTRACK_NODE { 63 uint64_t vkObj; // Object handle 64 VkDebugReportObjectTypeEXT objType; // Object type identifier 65 ObjectStatusFlags status; // Object state 66 uint64_t parentObj; // Parent object 67 uint64_t belongsTo; // Object Scope -- owning device/instance 68 } OBJTRACK_NODE; 69 70 // prototype for extension functions 71 uint64_t objTrackGetObjectCount(VkDevice device); 72 uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkDebugReportObjectTypeEXT type); 73 74 // Func ptr typedefs 75 typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice); 76 typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportObjectTypeEXT); 77 78 struct layer_data { 79 debug_report_data *report_data; 80 // TODO: put instance data here 81 std::vector<VkDebugReportCallbackEXT> logging_callback; 82 bool wsi_enabled; 83 bool objtrack_extensions_enabled; 84 85 layer_data() : report_data(nullptr), wsi_enabled(false), objtrack_extensions_enabled(false){}; 86 }; 87 88 struct instExts { 89 bool wsi_enabled; 90 }; 91 92 static std::unordered_map<void *, struct instExts> instanceExtMap; 93 static std::unordered_map<void *, layer_data *> layer_data_map; 94 static device_table_map object_tracker_device_table_map; 95 static instance_table_map object_tracker_instance_table_map; 96 97 // We need additionally validate image usage using a separate map 98 // of swapchain-created images 99 static unordered_map<uint64_t, OBJTRACK_NODE *> swapchainImageMap; 100 101 static long long unsigned int object_track_index = 0; 102 static int objLockInitialized = 0; 103 static loader_platform_thread_mutex objLock; 104 105 // Objects stored in a global map w/ struct containing basic info 106 // unordered_map<const void*, OBJTRACK_NODE*> objMap; 107 108 #define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT + 1) 109 110 static uint64_t numObjs[NUM_OBJECT_TYPES] = {0}; 111 static uint64_t numTotalObjs = 0; 112 static VkQueueFamilyProperties *queueInfo = NULL; 113 static uint32_t queueCount = 0; 114 115 template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map); 116 117 // 118 // Internal Object Tracker Functions 119 // 120 121 static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { 122 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 123 VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device); 124 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr; 125 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR"); 126 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR"); 127 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR"); 128 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR"); 129 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR"); 130 my_device_data->wsi_enabled = false; 131 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 132 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) 133 my_device_data->wsi_enabled = true; 134 135 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], "OBJTRACK_EXTENSIONS") == 0) 136 my_device_data->objtrack_extensions_enabled = true; 137 } 138 } 139 140 static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) { 141 uint32_t i; 142 VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance); 143 PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr; 144 145 pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR"); 146 pDisp->GetPhysicalDeviceSurfaceSupportKHR = 147 (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR"); 148 pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR = 149 (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); 150 pDisp->GetPhysicalDeviceSurfaceFormatsKHR = 151 (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR"); 152 pDisp->GetPhysicalDeviceSurfacePresentModesKHR = 153 (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR"); 154 155 #if VK_USE_PLATFORM_WIN32_KHR 156 pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR"); 157 pDisp->GetPhysicalDeviceWin32PresentationSupportKHR = 158 (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR"); 159 #endif // VK_USE_PLATFORM_WIN32_KHR 160 #ifdef VK_USE_PLATFORM_XCB_KHR 161 pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR"); 162 pDisp->GetPhysicalDeviceXcbPresentationSupportKHR = 163 (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR"); 164 #endif // VK_USE_PLATFORM_XCB_KHR 165 #ifdef VK_USE_PLATFORM_XLIB_KHR 166 pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR"); 167 pDisp->GetPhysicalDeviceXlibPresentationSupportKHR = 168 (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR"); 169 #endif // VK_USE_PLATFORM_XLIB_KHR 170 #ifdef VK_USE_PLATFORM_MIR_KHR 171 pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR"); 172 pDisp->GetPhysicalDeviceMirPresentationSupportKHR = 173 (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR"); 174 #endif // VK_USE_PLATFORM_MIR_KHR 175 #ifdef VK_USE_PLATFORM_WAYLAND_KHR 176 pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR"); 177 pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR = 178 (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"); 179 #endif // VK_USE_PLATFORM_WAYLAND_KHR 180 #ifdef VK_USE_PLATFORM_ANDROID_KHR 181 pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR"); 182 #endif // VK_USE_PLATFORM_ANDROID_KHR 183 184 instanceExtMap[pDisp].wsi_enabled = false; 185 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 186 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0) 187 instanceExtMap[pDisp].wsi_enabled = true; 188 } 189 } 190 191 // Indicate device or instance dispatch table type 192 typedef enum _DispTableType { 193 DISP_TBL_TYPE_INSTANCE, 194 DISP_TBL_TYPE_DEVICE, 195 } DispTableType; 196 197 debug_report_data *mdd(const void *object) { 198 dispatch_key key = get_dispatch_key(object); 199 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 200 return my_data->report_data; 201 } 202 203 debug_report_data *mid(VkInstance object) { 204 dispatch_key key = get_dispatch_key(object); 205 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 206 return my_data->report_data; 207 } 208 209 // For each Queue's doubly linked-list of mem refs 210 typedef struct _OT_MEM_INFO { 211 VkDeviceMemory mem; 212 struct _OT_MEM_INFO *pNextMI; 213 struct _OT_MEM_INFO *pPrevMI; 214 215 } OT_MEM_INFO; 216 217 // Track Queue information 218 typedef struct _OT_QUEUE_INFO { 219 OT_MEM_INFO *pMemRefList; 220 struct _OT_QUEUE_INFO *pNextQI; 221 uint32_t queueNodeIndex; 222 VkQueue queue; 223 uint32_t refCount; 224 } OT_QUEUE_INFO; 225 226 // Global list of QueueInfo structures, one per queue 227 static OT_QUEUE_INFO *g_pQueueInfo = NULL; 228 229 // Convert an object type enum to an object type array index 230 static uint32_t objTypeToIndex(uint32_t objType) { 231 uint32_t index = objType; 232 return index; 233 } 234 235 // Add new queue to head of global queue list 236 static void addQueueInfo(uint32_t queueNodeIndex, VkQueue queue) { 237 OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO; 238 239 if (pQueueInfo != NULL) { 240 memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO)); 241 pQueueInfo->queue = queue; 242 pQueueInfo->queueNodeIndex = queueNodeIndex; 243 pQueueInfo->pNextQI = g_pQueueInfo; 244 g_pQueueInfo = pQueueInfo; 245 } else { 246 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue), 247 __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK", 248 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information"); 249 } 250 } 251 252 // Destroy memRef lists and free all memory 253 static void destroyQueueMemRefLists(void) { 254 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo; 255 OT_QUEUE_INFO *pDelQueueInfo = NULL; 256 while (pQueueInfo != NULL) { 257 OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList; 258 while (pMemInfo != NULL) { 259 OT_MEM_INFO *pDelMemInfo = pMemInfo; 260 pMemInfo = pMemInfo->pNextMI; 261 delete pDelMemInfo; 262 } 263 pDelQueueInfo = pQueueInfo; 264 pQueueInfo = pQueueInfo->pNextQI; 265 delete pDelQueueInfo; 266 } 267 g_pQueueInfo = pQueueInfo; 268 } 269 270 static void setGpuQueueInfoState(uint32_t count, void *pData) { 271 queueCount = count; 272 queueInfo = (VkQueueFamilyProperties *)realloc((void *)queueInfo, count * sizeof(VkQueueFamilyProperties)); 273 if (queueInfo != NULL) { 274 memcpy(queueInfo, pData, count * sizeof(VkQueueFamilyProperties)); 275 } 276 } 277 278 // Check Queue type flags for selected queue operations 279 static void validateQueueFlags(VkQueue queue, const char *function) { 280 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo; 281 while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) { 282 pQueueInfo = pQueueInfo->pNextQI; 283 } 284 if (pQueueInfo != NULL) { 285 if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) { 286 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, 287 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK", 288 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function); 289 } 290 } 291 } 292 293 /* TODO: Port to new type safety */ 294 #if 0 295 // Check object status for selected flag state 296 static VkBool32 297 validate_status( 298 VkObject dispatchable_object, 299 VkObject vkObj, 300 VkObjectType objType, 301 ObjectStatusFlags status_mask, 302 ObjectStatusFlags status_flag, 303 VkFlags msg_flags, 304 OBJECT_TRACK_ERROR error_code, 305 const char *fail_msg) 306 { 307 if (objMap.find(vkObj) != objMap.end()) { 308 OBJTRACK_NODE* pNode = objMap[vkObj]; 309 if ((pNode->status & status_mask) != status_flag) { 310 char str[1024]; 311 log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK", 312 "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType), 313 static_cast<uint64_t>(vkObj), fail_msg); 314 return VK_FALSE; 315 } 316 return VK_TRUE; 317 } 318 else { 319 // If we do not find it print an error 320 log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK", 321 "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type", 322 static_cast<uint64_t>(vkObj), string_VkObjectType(objType)); 323 return VK_FALSE; 324 } 325 } 326 #endif 327 328 #include "vk_dispatch_table_helper.h" 329 330 static void init_object_tracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) { 331 332 layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker"); 333 334 if (!objLockInitialized) { 335 // TODO/TBD: Need to delete this mutex sometime. How??? One 336 // suggestion is to call this during vkCreateInstance(), and then we 337 // can clean it up during vkDestroyInstance(). However, that requires 338 // that the layer have per-instance locks. We need to come back and 339 // address this soon. 340 loader_platform_thread_create_mutex(&objLock); 341 objLockInitialized = 1; 342 } 343 } 344 345 // 346 // Forward declarations 347 // 348 349 static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType); 350 static void create_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType); 351 static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType); 352 static void create_device(VkPhysicalDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType); 353 static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType); 354 static VkBool32 validate_image(VkQueue dispatchable_object, VkImage object, VkDebugReportObjectTypeEXT objType, bool null_allowed); 355 static VkBool32 validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType, 356 bool null_allowed); 357 static VkBool32 validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType, 358 bool null_allowed); 359 static VkBool32 validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType, 360 bool null_allowed); 361 static VkBool32 validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object, 362 VkDebugReportObjectTypeEXT objType, bool null_allowed); 363 static VkBool32 validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType, 364 bool null_allowed); 365 static VkBool32 validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType, 366 bool null_allowed); 367 static void create_pipeline(VkDevice dispatchable_object, VkPipeline vkObj, VkDebugReportObjectTypeEXT objType); 368 static VkBool32 validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType, 369 bool null_allowed); 370 static VkBool32 validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType, 371 bool null_allowed); 372 static VkBool32 validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType, 373 bool null_allowed); 374 static VkBool32 validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType, 375 bool null_allowed); 376 static VkBool32 validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType, 377 bool null_allowed); 378 static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object); 379 static void destroy_command_buffer(VkCommandBuffer dispatchable_object, VkCommandBuffer object); 380 static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object); 381 static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object); 382 static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object); 383 static void destroy_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR object); 384 static VkBool32 set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType, 385 ObjectStatusFlags status_flag); 386 static VkBool32 reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType, 387 ObjectStatusFlags status_flag); 388 #if 0 389 static VkBool32 validate_status(VkDevice dispatchable_object, VkFence object, VkDebugReportObjectTypeEXT objType, 390 ObjectStatusFlags status_mask, ObjectStatusFlags status_flag, VkFlags msg_flags, OBJECT_TRACK_ERROR error_code, 391 const char *fail_msg); 392 #endif 393 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkPhysicalDeviceMap; 394 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkDeviceMap; 395 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkImageMap; 396 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap; 397 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkDescriptorSetMap; 398 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkBufferMap; 399 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkFenceMap; 400 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSemaphoreMap; 401 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandPoolMap; 402 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandBufferMap; 403 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSwapchainKHRMap; 404 extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSurfaceKHRMap; 405 406 static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType) { 407 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, 408 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 409 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj)); 410 411 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 412 pNewObjNode->objType = objType; 413 pNewObjNode->belongsTo = (uint64_t)dispatchable_object; 414 pNewObjNode->status = OBJSTATUS_NONE; 415 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj); 416 VkPhysicalDeviceMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode; 417 uint32_t objIndex = objTypeToIndex(objType); 418 numObjs[objIndex]++; 419 numTotalObjs++; 420 } 421 422 static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType) { 423 // TODO: Add tracking of surface objects 424 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, 425 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 426 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj)); 427 428 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 429 pNewObjNode->objType = objType; 430 pNewObjNode->belongsTo = (uint64_t)dispatchable_object; 431 pNewObjNode->status = OBJSTATUS_NONE; 432 pNewObjNode->vkObj = (uint64_t)(vkObj); 433 VkSurfaceKHRMap[(uint64_t)vkObj] = pNewObjNode; 434 uint32_t objIndex = objTypeToIndex(objType); 435 numObjs[objIndex]++; 436 numTotalObjs++; 437 } 438 439 static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object) { 440 uint64_t object_handle = (uint64_t)(object); 441 if (VkSurfaceKHRMap.find(object_handle) != VkSurfaceKHRMap.end()) { 442 OBJTRACK_NODE *pNode = VkSurfaceKHRMap[(uint64_t)object]; 443 uint32_t objIndex = objTypeToIndex(pNode->objType); 444 assert(numTotalObjs > 0); 445 numTotalObjs--; 446 assert(numObjs[objIndex] > 0); 447 numObjs[objIndex]--; 448 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, 449 OBJTRACK_NONE, "OBJTRACK", 450 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).", 451 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex], 452 string_VkDebugReportObjectTypeEXT(pNode->objType)); 453 delete pNode; 454 VkSurfaceKHRMap.erase(object_handle); 455 } else { 456 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, 457 OBJTRACK_NONE, "OBJTRACK", 458 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle); 459 } 460 } 461 462 static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj, 463 VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level) { 464 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE, 465 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 466 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj)); 467 468 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 469 pNewObjNode->objType = objType; 470 pNewObjNode->belongsTo = (uint64_t)device; 471 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj); 472 pNewObjNode->parentObj = (uint64_t)commandPool; 473 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) { 474 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY; 475 } else { 476 pNewObjNode->status = OBJSTATUS_NONE; 477 } 478 VkCommandBufferMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode; 479 uint32_t objIndex = objTypeToIndex(objType); 480 numObjs[objIndex]++; 481 numTotalObjs++; 482 } 483 484 static void free_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer) { 485 uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer); 486 if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) { 487 OBJTRACK_NODE *pNode = VkCommandBufferMap[(uint64_t)commandBuffer]; 488 489 if (pNode->parentObj != (uint64_t)(commandPool)) { 490 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, 491 OBJTRACK_COMMAND_POOL_MISMATCH, "OBJTRACK", 492 "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64 493 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").", 494 reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, (uint64_t)(commandPool)); 495 } else { 496 497 uint32_t objIndex = objTypeToIndex(pNode->objType); 498 assert(numTotalObjs > 0); 499 numTotalObjs--; 500 assert(numObjs[objIndex] > 0); 501 numObjs[objIndex]--; 502 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE, 503 "OBJTRACK", "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).", 504 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer), numTotalObjs, 505 numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType)); 506 delete pNode; 507 VkCommandBufferMap.erase(object_handle); 508 } 509 } else { 510 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE, 511 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", 512 object_handle); 513 } 514 } 515 516 static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj, 517 VkDebugReportObjectTypeEXT objType) { 518 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK", 519 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, string_VkDebugReportObjectTypeEXT(objType), 520 (uint64_t)(vkObj)); 521 522 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 523 pNewObjNode->objType = objType; 524 pNewObjNode->belongsTo = (uint64_t)device; 525 pNewObjNode->status = OBJSTATUS_NONE; 526 pNewObjNode->vkObj = (uint64_t)(vkObj); 527 pNewObjNode->parentObj = (uint64_t)descriptorPool; 528 VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode; 529 uint32_t objIndex = objTypeToIndex(objType); 530 numObjs[objIndex]++; 531 numTotalObjs++; 532 } 533 534 static void free_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet) { 535 uint64_t object_handle = (uint64_t)(descriptorSet); 536 if (VkDescriptorSetMap.find(object_handle) != VkDescriptorSetMap.end()) { 537 OBJTRACK_NODE *pNode = VkDescriptorSetMap[(uint64_t)descriptorSet]; 538 539 if (pNode->parentObj != (uint64_t)(descriptorPool)) { 540 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, 541 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK", 542 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64 543 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").", 544 (uint64_t)(descriptorSet), pNode->parentObj, (uint64_t)(descriptorPool)); 545 } else { 546 uint32_t objIndex = objTypeToIndex(pNode->objType); 547 assert(numTotalObjs > 0); 548 numTotalObjs--; 549 assert(numObjs[objIndex] > 0); 550 numObjs[objIndex]--; 551 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE, 552 "OBJTRACK", "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).", 553 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(descriptorSet), numTotalObjs, numObjs[objIndex], 554 string_VkDebugReportObjectTypeEXT(pNode->objType)); 555 delete pNode; 556 VkDescriptorSetMap.erase(object_handle); 557 } 558 } else { 559 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE, 560 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", 561 object_handle); 562 } 563 } 564 565 static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType) { 566 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, 567 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 568 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj)); 569 570 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 571 pNewObjNode->objType = objType; 572 pNewObjNode->belongsTo = (uint64_t)dispatchable_object; 573 pNewObjNode->status = OBJSTATUS_NONE; 574 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj); 575 VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode; 576 uint32_t objIndex = objTypeToIndex(objType); 577 numObjs[objIndex]++; 578 numTotalObjs++; 579 } 580 static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain) { 581 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)vkObj, 582 __LINE__, OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 583 "SwapchainImage", (uint64_t)(vkObj)); 584 585 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 586 pNewObjNode->belongsTo = (uint64_t)dispatchable_object; 587 pNewObjNode->objType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT; 588 pNewObjNode->status = OBJSTATUS_NONE; 589 pNewObjNode->vkObj = (uint64_t)vkObj; 590 pNewObjNode->parentObj = (uint64_t)swapchain; 591 swapchainImageMap[(uint64_t)(vkObj)] = pNewObjNode; 592 } 593 594 static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebugReportObjectTypeEXT objType) { 595 log_msg(mid(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, 596 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 597 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj)); 598 599 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 600 pNewObjNode->belongsTo = (uint64_t)dispatchable_object; 601 pNewObjNode->objType = objType; 602 pNewObjNode->status = OBJSTATUS_NONE; 603 pNewObjNode->vkObj = (uint64_t)(vkObj); 604 VkDeviceMap[(uint64_t)vkObj] = pNewObjNode; 605 uint32_t objIndex = objTypeToIndex(objType); 606 numObjs[objIndex]++; 607 numTotalObjs++; 608 } 609 610 // 611 // Non-auto-generated API functions called by generated code 612 // 613 VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, 614 VkInstance *pInstance) { 615 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 616 617 assert(chain_info->u.pLayerInfo); 618 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 619 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); 620 if (fpCreateInstance == NULL) { 621 return VK_ERROR_INITIALIZATION_FAILED; 622 } 623 624 // Advance the link info for the next element on the chain 625 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 626 627 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); 628 if (result != VK_SUCCESS) { 629 return result; 630 } 631 632 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map); 633 initInstanceTable(*pInstance, fpGetInstanceProcAddr, object_tracker_instance_table_map); 634 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance); 635 636 my_data->report_data = debug_report_create_instance(pInstanceTable, *pInstance, pCreateInfo->enabledExtensionCount, 637 pCreateInfo->ppEnabledExtensionNames); 638 639 init_object_tracker(my_data, pAllocator); 640 createInstanceRegisterExtensions(pCreateInfo, *pInstance); 641 642 create_instance(*pInstance, *pInstance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT); 643 644 return result; 645 } 646 647 void explicit_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice gpu, uint32_t *pCount, VkQueueFamilyProperties *pProperties) { 648 get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties); 649 650 loader_platform_thread_lock_mutex(&objLock); 651 if (pProperties != NULL) 652 setGpuQueueInfoState(*pCount, pProperties); 653 loader_platform_thread_unlock_mutex(&objLock); 654 } 655 656 VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, 657 VkDevice *pDevice) { 658 loader_platform_thread_lock_mutex(&objLock); 659 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 660 661 assert(chain_info->u.pLayerInfo); 662 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 663 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; 664 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice"); 665 if (fpCreateDevice == NULL) { 666 loader_platform_thread_unlock_mutex(&objLock); 667 return VK_ERROR_INITIALIZATION_FAILED; 668 } 669 670 // Advance the link info for the next element on the chain 671 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 672 673 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); 674 if (result != VK_SUCCESS) { 675 loader_platform_thread_unlock_mutex(&objLock); 676 return result; 677 } 678 679 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); 680 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map); 681 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); 682 683 initDeviceTable(*pDevice, fpGetDeviceProcAddr, object_tracker_device_table_map); 684 685 createDeviceRegisterExtensions(pCreateInfo, *pDevice); 686 687 if (VkPhysicalDeviceMap.find((uint64_t)gpu) != VkPhysicalDeviceMap.end()) { 688 OBJTRACK_NODE *pNewObjNode = VkPhysicalDeviceMap[(uint64_t)gpu]; 689 create_device((VkInstance)pNewObjNode->belongsTo, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT); 690 } 691 692 loader_platform_thread_unlock_mutex(&objLock); 693 return result; 694 } 695 696 VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, 697 VkPhysicalDevice *pPhysicalDevices) { 698 VkBool32 skipCall = VK_FALSE; 699 loader_platform_thread_lock_mutex(&objLock); 700 skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false); 701 loader_platform_thread_unlock_mutex(&objLock); 702 if (skipCall) 703 return VK_ERROR_VALIDATION_FAILED_EXT; 704 VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance) 705 ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices); 706 loader_platform_thread_lock_mutex(&objLock); 707 if (result == VK_SUCCESS) { 708 if (pPhysicalDevices) { 709 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) { 710 create_physical_device(instance, pPhysicalDevices[i], VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT); 711 } 712 } 713 } 714 loader_platform_thread_unlock_mutex(&objLock); 715 return result; 716 } 717 718 void explicit_GetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) { 719 loader_platform_thread_lock_mutex(&objLock); 720 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 721 loader_platform_thread_unlock_mutex(&objLock); 722 723 get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue); 724 725 loader_platform_thread_lock_mutex(&objLock); 726 addQueueInfo(queueNodeIndex, *pQueue); 727 create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT); 728 loader_platform_thread_unlock_mutex(&objLock); 729 } 730 731 VkResult explicit_MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, 732 void **ppData) { 733 VkBool32 skipCall = VK_FALSE; 734 loader_platform_thread_lock_mutex(&objLock); 735 skipCall |= set_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED); 736 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 737 loader_platform_thread_unlock_mutex(&objLock); 738 if (skipCall == VK_TRUE) 739 return VK_ERROR_VALIDATION_FAILED_EXT; 740 741 VkResult result = 742 get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData); 743 744 return result; 745 } 746 747 void explicit_UnmapMemory(VkDevice device, VkDeviceMemory mem) { 748 VkBool32 skipCall = VK_FALSE; 749 loader_platform_thread_lock_mutex(&objLock); 750 skipCall |= reset_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED); 751 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 752 loader_platform_thread_unlock_mutex(&objLock); 753 if (skipCall == VK_TRUE) 754 return; 755 756 get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem); 757 } 758 759 VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) { 760 loader_platform_thread_lock_mutex(&objLock); 761 validateQueueFlags(queue, "QueueBindSparse"); 762 763 for (uint32_t i = 0; i < bindInfoCount; i++) { 764 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; j++) 765 validate_buffer(queue, pBindInfo[i].pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, false); 766 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; j++) 767 validate_image(queue, pBindInfo[i].pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false); 768 for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++) 769 validate_image(queue, pBindInfo[i].pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false); 770 } 771 772 loader_platform_thread_unlock_mutex(&objLock); 773 774 VkResult result = 775 get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence); 776 return result; 777 } 778 779 VkResult explicit_AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo, 780 VkCommandBuffer *pCommandBuffers) { 781 VkBool32 skipCall = VK_FALSE; 782 loader_platform_thread_lock_mutex(&objLock); 783 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 784 skipCall |= validate_command_pool(device, pAllocateInfo->commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false); 785 loader_platform_thread_unlock_mutex(&objLock); 786 787 if (skipCall) { 788 return VK_ERROR_VALIDATION_FAILED_EXT; 789 } 790 791 VkResult result = 792 get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers); 793 794 loader_platform_thread_lock_mutex(&objLock); 795 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) { 796 alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 797 pAllocateInfo->level); 798 } 799 loader_platform_thread_unlock_mutex(&objLock); 800 801 return result; 802 } 803 804 VkResult explicit_AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, 805 VkDescriptorSet *pDescriptorSets) { 806 VkBool32 skipCall = VK_FALSE; 807 loader_platform_thread_lock_mutex(&objLock); 808 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 809 skipCall |= 810 validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false); 811 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) { 812 skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i], 813 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false); 814 } 815 loader_platform_thread_unlock_mutex(&objLock); 816 if (skipCall) 817 return VK_ERROR_VALIDATION_FAILED_EXT; 818 819 VkResult result = 820 get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); 821 822 if (VK_SUCCESS == result) { 823 loader_platform_thread_lock_mutex(&objLock); 824 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) { 825 alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i], 826 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT); 827 } 828 loader_platform_thread_unlock_mutex(&objLock); 829 } 830 831 return result; 832 } 833 834 void explicit_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, 835 const VkCommandBuffer *pCommandBuffers) { 836 loader_platform_thread_lock_mutex(&objLock); 837 validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false); 838 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 839 loader_platform_thread_unlock_mutex(&objLock); 840 841 get_dispatch_table(object_tracker_device_table_map, device) 842 ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); 843 844 loader_platform_thread_lock_mutex(&objLock); 845 for (uint32_t i = 0; i < commandBufferCount; i++) { 846 free_command_buffer(device, commandPool, *pCommandBuffers); 847 pCommandBuffers++; 848 } 849 loader_platform_thread_unlock_mutex(&objLock); 850 } 851 852 void explicit_DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { 853 loader_platform_thread_lock_mutex(&objLock); 854 // A swapchain's images are implicitly deleted when the swapchain is deleted. 855 // Remove this swapchain's images from our map of such images. 856 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = swapchainImageMap.begin(); 857 while (itr != swapchainImageMap.end()) { 858 OBJTRACK_NODE *pNode = (*itr).second; 859 if (pNode->parentObj == (uint64_t)(swapchain)) { 860 swapchainImageMap.erase(itr++); 861 } else { 862 ++itr; 863 } 864 } 865 destroy_swapchain_khr(device, swapchain); 866 loader_platform_thread_unlock_mutex(&objLock); 867 868 get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator); 869 } 870 871 void explicit_FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) { 872 loader_platform_thread_lock_mutex(&objLock); 873 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 874 loader_platform_thread_unlock_mutex(&objLock); 875 876 get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator); 877 878 loader_platform_thread_lock_mutex(&objLock); 879 destroy_device_memory(device, mem); 880 loader_platform_thread_unlock_mutex(&objLock); 881 } 882 883 VkResult explicit_FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, 884 const VkDescriptorSet *pDescriptorSets) { 885 loader_platform_thread_lock_mutex(&objLock); 886 validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false); 887 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 888 loader_platform_thread_unlock_mutex(&objLock); 889 VkResult result = get_dispatch_table(object_tracker_device_table_map, device) 890 ->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets); 891 892 loader_platform_thread_lock_mutex(&objLock); 893 for (uint32_t i = 0; i < count; i++) { 894 free_descriptor_set(device, descriptorPool, *pDescriptorSets++); 895 } 896 loader_platform_thread_unlock_mutex(&objLock); 897 return result; 898 } 899 900 void explicit_DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { 901 VkBool32 skipCall = VK_FALSE; 902 loader_platform_thread_lock_mutex(&objLock); 903 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 904 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false); 905 loader_platform_thread_unlock_mutex(&objLock); 906 if (skipCall) { 907 return; 908 } 909 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted. 910 // Remove this pool's descriptor sets from our descriptorSet map. 911 loader_platform_thread_lock_mutex(&objLock); 912 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkDescriptorSetMap.begin(); 913 while (itr != VkDescriptorSetMap.end()) { 914 OBJTRACK_NODE *pNode = (*itr).second; 915 auto del_itr = itr++; 916 if (pNode->parentObj == (uint64_t)(descriptorPool)) { 917 destroy_descriptor_set(device, (VkDescriptorSet)((*del_itr).first)); 918 } 919 } 920 destroy_descriptor_pool(device, descriptorPool); 921 loader_platform_thread_unlock_mutex(&objLock); 922 get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator); 923 } 924 925 void explicit_DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { 926 VkBool32 skipCall = VK_FALSE; 927 loader_platform_thread_lock_mutex(&objLock); 928 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 929 skipCall |= validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false); 930 loader_platform_thread_unlock_mutex(&objLock); 931 if (skipCall) { 932 return; 933 } 934 loader_platform_thread_lock_mutex(&objLock); 935 // A CommandPool's command buffers are implicitly deleted when the pool is deleted. 936 // Remove this pool's cmdBuffers from our cmd buffer map. 937 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkCommandBufferMap.begin(); 938 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator del_itr; 939 while (itr != VkCommandBufferMap.end()) { 940 OBJTRACK_NODE *pNode = (*itr).second; 941 del_itr = itr++; 942 if (pNode->parentObj == (uint64_t)(commandPool)) { 943 destroy_command_buffer(reinterpret_cast<VkCommandBuffer>((*del_itr).first), 944 reinterpret_cast<VkCommandBuffer>((*del_itr).first)); 945 } 946 } 947 destroy_command_pool(device, commandPool); 948 loader_platform_thread_unlock_mutex(&objLock); 949 get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator); 950 } 951 952 VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) { 953 VkBool32 skipCall = VK_FALSE; 954 loader_platform_thread_lock_mutex(&objLock); 955 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 956 loader_platform_thread_unlock_mutex(&objLock); 957 if (skipCall) 958 return VK_ERROR_VALIDATION_FAILED_EXT; 959 960 VkResult result = get_dispatch_table(object_tracker_device_table_map, device) 961 ->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages); 962 963 if (pSwapchainImages != NULL) { 964 loader_platform_thread_lock_mutex(&objLock); 965 for (uint32_t i = 0; i < *pCount; i++) { 966 create_swapchain_image_obj(device, pSwapchainImages[i], swapchain); 967 } 968 loader_platform_thread_unlock_mutex(&objLock); 969 } 970 return result; 971 } 972 973 // TODO: Add special case to codegen to cover validating all the pipelines instead of just the first 974 VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, 975 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 976 VkPipeline *pPipelines) { 977 VkBool32 skipCall = VK_FALSE; 978 loader_platform_thread_lock_mutex(&objLock); 979 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 980 if (pCreateInfos) { 981 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { 982 if (pCreateInfos[idx0].basePipelineHandle) { 983 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle, 984 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true); 985 } 986 if (pCreateInfos[idx0].layout) { 987 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout, 988 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false); 989 } 990 if (pCreateInfos[idx0].pStages) { 991 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) { 992 if (pCreateInfos[idx0].pStages[idx1].module) { 993 skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module, 994 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false); 995 } 996 } 997 } 998 if (pCreateInfos[idx0].renderPass) { 999 skipCall |= 1000 validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false); 1001 } 1002 } 1003 } 1004 if (pipelineCache) { 1005 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false); 1006 } 1007 loader_platform_thread_unlock_mutex(&objLock); 1008 if (skipCall) 1009 return VK_ERROR_VALIDATION_FAILED_EXT; 1010 VkResult result = get_dispatch_table(object_tracker_device_table_map, device) 1011 ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); 1012 loader_platform_thread_lock_mutex(&objLock); 1013 if (result == VK_SUCCESS) { 1014 for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) { 1015 create_pipeline(device, pPipelines[idx2], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT); 1016 } 1017 } 1018 loader_platform_thread_unlock_mutex(&objLock); 1019 return result; 1020 } 1021 1022 // TODO: Add special case to codegen to cover validating all the pipelines instead of just the first 1023 VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, 1024 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 1025 VkPipeline *pPipelines) { 1026 VkBool32 skipCall = VK_FALSE; 1027 loader_platform_thread_lock_mutex(&objLock); 1028 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 1029 if (pCreateInfos) { 1030 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { 1031 if (pCreateInfos[idx0].basePipelineHandle) { 1032 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle, 1033 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true); 1034 } 1035 if (pCreateInfos[idx0].layout) { 1036 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout, 1037 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false); 1038 } 1039 if (pCreateInfos[idx0].stage.module) { 1040 skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module, 1041 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false); 1042 } 1043 } 1044 } 1045 if (pipelineCache) { 1046 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false); 1047 } 1048 loader_platform_thread_unlock_mutex(&objLock); 1049 if (skipCall) 1050 return VK_ERROR_VALIDATION_FAILED_EXT; 1051 VkResult result = get_dispatch_table(object_tracker_device_table_map, device) 1052 ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); 1053 loader_platform_thread_lock_mutex(&objLock); 1054 if (result == VK_SUCCESS) { 1055 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) { 1056 create_pipeline(device, pPipelines[idx1], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT); 1057 } 1058 } 1059 loader_platform_thread_unlock_mutex(&objLock); 1060 return result; 1061 } 1062