1 /* 2 * Copyright (c) 2015-2016 The Khronos Group Inc. 3 * Copyright (c) 2015-2016 Valve Corporation 4 * Copyright (c) 2015-2016 LunarG, Inc. 5 * Copyright (c) 2015-2016 Google, Inc. 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); 8 * you may not use this file except in compliance with the License. 9 * You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 * 19 * Author: Tobin Ehlis <tobine (at) google.com> 20 * Author: Mark Lobodzinski <mark (at) lunarg.com> 21 */ 22 23 #define NOMINMAX 24 25 #include <stdio.h> 26 #include <stdlib.h> 27 #include <string.h> 28 #include <unordered_map> 29 #include <vector> 30 #include <list> 31 #include <memory> 32 #include <algorithm> 33 34 // For Windows, this #include must come before other Vk headers. 35 #include "vk_loader_platform.h" 36 37 #include "unique_objects.h" 38 #include "vk_dispatch_table_helper.h" 39 #include "vk_layer_config.h" 40 #include "vk_layer_data.h" 41 #include "vk_layer_extension_utils.h" 42 #include "vk_layer_logging.h" 43 #include "vk_layer_table.h" 44 #include "vk_layer_utils.h" 45 #include "vk_layer_utils.h" 46 #include "vk_enum_string_helper.h" 47 #include "vk_validation_error_messages.h" 48 #include "vk_object_types.h" 49 #include "vk_extension_helper.h" 50 #include "vulkan/vk_layer.h" 51 52 // This intentionally includes a cpp file 53 #include "vk_safe_struct.cpp" 54 55 #include "unique_objects_wrappers.h" 56 57 namespace unique_objects { 58 59 static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION; 60 61 static void initUniqueObjects(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) { 62 layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "google_unique_objects"); 63 } 64 65 // Check enabled instance extensions against supported instance extension whitelist 66 static void InstanceExtensionWhitelist(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) { 67 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); 68 69 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 70 // Check for recognized instance extensions 71 if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kInstanceExtensionNames)) { 72 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, 73 VALIDATION_ERROR_UNDEFINED, "UniqueObjects", 74 "Instance Extension %s is not supported by this layer. Using this extension may adversely affect validation " 75 "results and/or produce undefined behavior.", 76 pCreateInfo->ppEnabledExtensionNames[i]); 77 } 78 } 79 } 80 81 // Check enabled device extensions against supported device extension whitelist 82 static void DeviceExtensionWhitelist(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { 83 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 84 85 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 86 // Check for recognized device extensions 87 if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kDeviceExtensionNames)) { 88 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, 89 VALIDATION_ERROR_UNDEFINED, "UniqueObjects", 90 "Device Extension %s is not supported by this layer. Using this extension may adversely affect validation " 91 "results and/or produce undefined behavior.", 92 pCreateInfo->ppEnabledExtensionNames[i]); 93 } 94 } 95 } 96 97 VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, 98 VkInstance *pInstance) { 99 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 100 101 assert(chain_info->u.pLayerInfo); 102 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 103 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); 104 if (fpCreateInstance == NULL) { 105 return VK_ERROR_INITIALIZATION_FAILED; 106 } 107 108 // Advance the link info for the next element on the chain 109 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 110 111 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); 112 if (result != VK_SUCCESS) { 113 return result; 114 } 115 116 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map); 117 instance_data->instance = *pInstance; 118 layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr); 119 120 instance_data->instance = *pInstance; 121 instance_data->report_data = debug_report_create_instance( 122 &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames); 123 124 // Set up temporary debug callbacks to output messages at CreateInstance-time 125 if (!layer_copy_tmp_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_callbacks, &instance_data->tmp_dbg_create_infos, 126 &instance_data->tmp_callbacks)) { 127 if (instance_data->num_tmp_callbacks > 0) { 128 if (layer_enable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, 129 instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks)) { 130 layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks); 131 instance_data->num_tmp_callbacks = 0; 132 } 133 } 134 } 135 136 initUniqueObjects(instance_data, pAllocator); 137 InstanceExtensionWhitelist(pCreateInfo, *pInstance); 138 139 // Disable and free tmp callbacks, no longer necessary 140 if (instance_data->num_tmp_callbacks > 0) { 141 layer_disable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, instance_data->tmp_callbacks); 142 layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks); 143 instance_data->num_tmp_callbacks = 0; 144 } 145 146 return result; 147 } 148 149 VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { 150 dispatch_key key = get_dispatch_key(instance); 151 instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map); 152 VkLayerInstanceDispatchTable *disp_table = &instance_data->dispatch_table; 153 disp_table->DestroyInstance(instance, pAllocator); 154 155 // Clean up logging callback, if any 156 while (instance_data->logging_callback.size() > 0) { 157 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back(); 158 layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator); 159 instance_data->logging_callback.pop_back(); 160 } 161 162 layer_debug_report_destroy_instance(instance_data->report_data); 163 FreeLayerDataPtr(key, instance_layer_data_map); 164 } 165 166 VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, 167 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { 168 instance_layer_data *my_instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map); 169 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 170 171 assert(chain_info->u.pLayerInfo); 172 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 173 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; 174 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice"); 175 if (fpCreateDevice == NULL) { 176 return VK_ERROR_INITIALIZATION_FAILED; 177 } 178 179 // Advance the link info for the next element on the chain 180 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 181 182 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); 183 if (result != VK_SUCCESS) { 184 return result; 185 } 186 187 layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); 188 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); 189 190 // Setup layer's device dispatch table 191 layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr); 192 193 DeviceExtensionWhitelist(pCreateInfo, *pDevice); 194 195 // Set gpu for this device in order to get at any objects mapped at instance level 196 my_device_data->instance_data = my_instance_data; 197 198 return result; 199 } 200 201 VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { 202 dispatch_key key = get_dispatch_key(device); 203 layer_data *dev_data = GetLayerDataPtr(key, layer_data_map); 204 205 layer_debug_report_destroy_device(device); 206 dev_data->dispatch_table.DestroyDevice(device, pAllocator); 207 208 FreeLayerDataPtr(key, layer_data_map); 209 } 210 211 static const VkLayerProperties globalLayerProps = {"VK_LAYER_GOOGLE_unique_objects", 212 VK_LAYER_API_VERSION, // specVersion 213 1, // implementationVersion 214 "Google Validation Layer"}; 215 216 /// Declare prototype for these functions 217 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName); 218 219 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { 220 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties); 221 } 222 223 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, 224 VkLayerProperties *pProperties) { 225 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties); 226 } 227 228 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, 229 VkExtensionProperties *pProperties) { 230 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName)) 231 return util_GetExtensionProperties(0, NULL, pCount, pProperties); 232 233 return VK_ERROR_LAYER_NOT_PRESENT; 234 } 235 236 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName, 237 uint32_t *pCount, VkExtensionProperties *pProperties) { 238 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName)) 239 return util_GetExtensionProperties(0, nullptr, pCount, pProperties); 240 241 assert(physicalDevice); 242 243 dispatch_key key = get_dispatch_key(physicalDevice); 244 instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map); 245 return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties); 246 } 247 248 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) { 249 const auto item = name_to_funcptr_map.find(funcName); 250 if (item != name_to_funcptr_map.end()) { 251 return reinterpret_cast<PFN_vkVoidFunction>(item->second); 252 } 253 254 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 255 const auto &table = device_data->dispatch_table; 256 if (!table.GetDeviceProcAddr) return nullptr; 257 return table.GetDeviceProcAddr(device, funcName); 258 } 259 260 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) { 261 const auto item = name_to_funcptr_map.find(funcName); 262 if (item != name_to_funcptr_map.end()) { 263 return reinterpret_cast<PFN_vkVoidFunction>(item->second); 264 } 265 266 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); 267 const auto &table = instance_data->dispatch_table; 268 if (!table.GetInstanceProcAddr) return nullptr; 269 return table.GetInstanceProcAddr(instance, funcName); 270 } 271 272 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) { 273 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); 274 VkLayerInstanceDispatchTable *disp_table = &instance_data->dispatch_table; 275 if (disp_table->GetPhysicalDeviceProcAddr == NULL) { 276 return NULL; 277 } 278 return disp_table->GetPhysicalDeviceProcAddr(instance, funcName); 279 } 280 281 VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, 282 const VkComputePipelineCreateInfo *pCreateInfos, 283 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { 284 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 285 safe_VkComputePipelineCreateInfo *local_pCreateInfos = NULL; 286 if (pCreateInfos) { 287 std::lock_guard<std::mutex> lock(global_lock); 288 local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount]; 289 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { 290 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]); 291 if (pCreateInfos[idx0].basePipelineHandle) { 292 local_pCreateInfos[idx0].basePipelineHandle = Unwrap(device_data, pCreateInfos[idx0].basePipelineHandle); 293 } 294 if (pCreateInfos[idx0].layout) { 295 local_pCreateInfos[idx0].layout = Unwrap(device_data, pCreateInfos[idx0].layout); 296 } 297 if (pCreateInfos[idx0].stage.module) { 298 local_pCreateInfos[idx0].stage.module = Unwrap(device_data, pCreateInfos[idx0].stage.module); 299 } 300 } 301 } 302 if (pipelineCache) { 303 std::lock_guard<std::mutex> lock(global_lock); 304 pipelineCache = Unwrap(device_data, pipelineCache); 305 } 306 307 VkResult result = device_data->dispatch_table.CreateComputePipelines(device, pipelineCache, createInfoCount, 308 local_pCreateInfos->ptr(), pAllocator, pPipelines); 309 delete[] local_pCreateInfos; 310 { 311 std::lock_guard<std::mutex> lock(global_lock); 312 for (uint32_t i = 0; i < createInfoCount; ++i) { 313 if (pPipelines[i] != VK_NULL_HANDLE) { 314 pPipelines[i] = WrapNew(device_data, pPipelines[i]); 315 } 316 } 317 } 318 return result; 319 } 320 321 VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, 322 const VkGraphicsPipelineCreateInfo *pCreateInfos, 323 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { 324 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 325 safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = nullptr; 326 if (pCreateInfos) { 327 local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount]; 328 std::lock_guard<std::mutex> lock(global_lock); 329 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { 330 bool uses_color_attachment = false; 331 bool uses_depthstencil_attachment = false; 332 { 333 const auto subpasses_uses_it = 334 device_data->renderpasses_states.find(Unwrap(device_data, pCreateInfos[idx0].renderPass)); 335 if (subpasses_uses_it != device_data->renderpasses_states.end()) { 336 const auto &subpasses_uses = subpasses_uses_it->second; 337 if (subpasses_uses.subpasses_using_color_attachment.count(pCreateInfos[idx0].subpass)) 338 uses_color_attachment = true; 339 if (subpasses_uses.subpasses_using_depthstencil_attachment.count(pCreateInfos[idx0].subpass)) 340 uses_depthstencil_attachment = true; 341 } 342 } 343 344 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0], uses_color_attachment, uses_depthstencil_attachment); 345 346 if (pCreateInfos[idx0].basePipelineHandle) { 347 local_pCreateInfos[idx0].basePipelineHandle = Unwrap(device_data, pCreateInfos[idx0].basePipelineHandle); 348 } 349 if (pCreateInfos[idx0].layout) { 350 local_pCreateInfos[idx0].layout = Unwrap(device_data, pCreateInfos[idx0].layout); 351 } 352 if (pCreateInfos[idx0].pStages) { 353 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) { 354 if (pCreateInfos[idx0].pStages[idx1].module) { 355 local_pCreateInfos[idx0].pStages[idx1].module = 356 Unwrap(device_data, pCreateInfos[idx0].pStages[idx1].module); 357 } 358 } 359 } 360 if (pCreateInfos[idx0].renderPass) { 361 local_pCreateInfos[idx0].renderPass = Unwrap(device_data, pCreateInfos[idx0].renderPass); 362 } 363 } 364 } 365 if (pipelineCache) { 366 std::lock_guard<std::mutex> lock(global_lock); 367 pipelineCache = Unwrap(device_data, pipelineCache); 368 } 369 370 VkResult result = device_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, createInfoCount, 371 local_pCreateInfos->ptr(), pAllocator, pPipelines); 372 delete[] local_pCreateInfos; 373 { 374 std::lock_guard<std::mutex> lock(global_lock); 375 for (uint32_t i = 0; i < createInfoCount; ++i) { 376 if (pPipelines[i] != VK_NULL_HANDLE) { 377 pPipelines[i] = WrapNew(device_data, pPipelines[i]); 378 } 379 } 380 } 381 return result; 382 } 383 384 static void PostCallCreateRenderPass(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, VkRenderPass renderPass) { 385 auto &renderpass_state = dev_data->renderpasses_states[renderPass]; 386 387 for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { 388 bool uses_color = false; 389 for (uint32_t i = 0; i < pCreateInfo->pSubpasses[subpass].colorAttachmentCount && !uses_color; ++i) 390 if (pCreateInfo->pSubpasses[subpass].pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) uses_color = true; 391 392 bool uses_depthstencil = false; 393 if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment) 394 if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) 395 uses_depthstencil = true; 396 397 if (uses_color) renderpass_state.subpasses_using_color_attachment.insert(subpass); 398 if (uses_depthstencil) renderpass_state.subpasses_using_depthstencil_attachment.insert(subpass); 399 } 400 } 401 402 VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 403 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { 404 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 405 VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); 406 if (VK_SUCCESS == result) { 407 std::lock_guard<std::mutex> lock(global_lock); 408 409 PostCallCreateRenderPass(dev_data, pCreateInfo, *pRenderPass); 410 411 *pRenderPass = WrapNew(dev_data, *pRenderPass); 412 } 413 return result; 414 } 415 416 static void PostCallDestroyRenderPass(layer_data *dev_data, VkRenderPass renderPass) { 417 dev_data->renderpasses_states.erase(renderPass); 418 } 419 420 VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { 421 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 422 std::unique_lock<std::mutex> lock(global_lock); 423 uint64_t renderPass_id = reinterpret_cast<uint64_t &>(renderPass); 424 renderPass = (VkRenderPass)dev_data->unique_id_mapping[renderPass_id]; 425 dev_data->unique_id_mapping.erase(renderPass_id); 426 lock.unlock(); 427 dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator); 428 429 lock.lock(); 430 PostCallDestroyRenderPass(dev_data, renderPass); 431 } 432 433 VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, 434 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) { 435 layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 436 safe_VkSwapchainCreateInfoKHR *local_pCreateInfo = NULL; 437 if (pCreateInfo) { 438 std::lock_guard<std::mutex> lock(global_lock); 439 local_pCreateInfo = new safe_VkSwapchainCreateInfoKHR(pCreateInfo); 440 local_pCreateInfo->oldSwapchain = Unwrap(my_map_data, pCreateInfo->oldSwapchain); 441 // Surface is instance-level object 442 local_pCreateInfo->surface = Unwrap(my_map_data->instance_data, pCreateInfo->surface); 443 } 444 445 VkResult result = my_map_data->dispatch_table.CreateSwapchainKHR(device, local_pCreateInfo->ptr(), pAllocator, pSwapchain); 446 if (local_pCreateInfo) { 447 delete local_pCreateInfo; 448 } 449 if (VK_SUCCESS == result) { 450 std::lock_guard<std::mutex> lock(global_lock); 451 *pSwapchain = WrapNew(my_map_data, *pSwapchain); 452 } 453 return result; 454 } 455 456 VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, 457 const VkSwapchainCreateInfoKHR *pCreateInfos, 458 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) { 459 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 460 safe_VkSwapchainCreateInfoKHR *local_pCreateInfos = NULL; 461 { 462 std::lock_guard<std::mutex> lock(global_lock); 463 if (pCreateInfos) { 464 local_pCreateInfos = new safe_VkSwapchainCreateInfoKHR[swapchainCount]; 465 for (uint32_t i = 0; i < swapchainCount; ++i) { 466 local_pCreateInfos[i].initialize(&pCreateInfos[i]); 467 if (pCreateInfos[i].surface) { 468 // Surface is instance-level object 469 local_pCreateInfos[i].surface = Unwrap(dev_data->instance_data, pCreateInfos[i].surface); 470 } 471 if (pCreateInfos[i].oldSwapchain) { 472 local_pCreateInfos[i].oldSwapchain = Unwrap(dev_data, pCreateInfos[i].oldSwapchain); 473 } 474 } 475 } 476 } 477 VkResult result = dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, local_pCreateInfos->ptr(), 478 pAllocator, pSwapchains); 479 if (local_pCreateInfos) delete[] local_pCreateInfos; 480 if (VK_SUCCESS == result) { 481 std::lock_guard<std::mutex> lock(global_lock); 482 for (uint32_t i = 0; i < swapchainCount; i++) { 483 pSwapchains[i] = WrapNew(dev_data, pSwapchains[i]); 484 } 485 } 486 return result; 487 } 488 489 VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, 490 VkImage *pSwapchainImages) { 491 layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 492 if (VK_NULL_HANDLE != swapchain) { 493 std::lock_guard<std::mutex> lock(global_lock); 494 swapchain = Unwrap(my_device_data, swapchain); 495 } 496 VkResult result = 497 my_device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); 498 // TODO : Need to add corresponding code to delete these images 499 if (VK_SUCCESS == result) { 500 if ((*pSwapchainImageCount > 0) && pSwapchainImages) { 501 std::lock_guard<std::mutex> lock(global_lock); 502 for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) { 503 pSwapchainImages[i] = WrapNew(my_device_data, pSwapchainImages[i]); 504 } 505 } 506 } 507 return result; 508 } 509 510 VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { 511 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map); 512 safe_VkPresentInfoKHR *local_pPresentInfo = NULL; 513 { 514 std::lock_guard<std::mutex> lock(global_lock); 515 if (pPresentInfo) { 516 local_pPresentInfo = new safe_VkPresentInfoKHR(pPresentInfo); 517 if (local_pPresentInfo->pWaitSemaphores) { 518 for (uint32_t index1 = 0; index1 < local_pPresentInfo->waitSemaphoreCount; ++index1) { 519 local_pPresentInfo->pWaitSemaphores[index1] = Unwrap(dev_data, pPresentInfo->pWaitSemaphores[index1]); 520 } 521 } 522 if (local_pPresentInfo->pSwapchains) { 523 for (uint32_t index1 = 0; index1 < local_pPresentInfo->swapchainCount; ++index1) { 524 local_pPresentInfo->pSwapchains[index1] = Unwrap(dev_data, pPresentInfo->pSwapchains[index1]); 525 } 526 } 527 } 528 } 529 VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, local_pPresentInfo->ptr()); 530 531 // pResults is an output array embedded in a structure. The code generator neglects to copy back from the safe_* version, 532 // so handle it as a special case here: 533 if (pPresentInfo && pPresentInfo->pResults) { 534 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) { 535 pPresentInfo->pResults[i] = local_pPresentInfo->pResults[i]; 536 } 537 } 538 539 if (local_pPresentInfo) delete local_pPresentInfo; 540 return result; 541 } 542 543 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device, 544 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, 545 const VkAllocationCallbacks *pAllocator, 546 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { 547 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 548 safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info = NULL; 549 { 550 std::lock_guard<std::mutex> lock(global_lock); 551 if (pCreateInfo) { 552 local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo); 553 if (pCreateInfo->descriptorSetLayout) { 554 local_create_info->descriptorSetLayout = Unwrap(dev_data, pCreateInfo->descriptorSetLayout); 555 } 556 if (pCreateInfo->pipelineLayout) { 557 local_create_info->pipelineLayout = Unwrap(dev_data, pCreateInfo->pipelineLayout); 558 } 559 } 560 } 561 VkResult result = dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, local_create_info->ptr(), pAllocator, 562 pDescriptorUpdateTemplate); 563 if (VK_SUCCESS == result) { 564 std::lock_guard<std::mutex> lock(global_lock); 565 *pDescriptorUpdateTemplate = WrapNew(dev_data, *pDescriptorUpdateTemplate); 566 567 // Shadow template createInfo for later updates 568 std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info)); 569 dev_data->desc_template_map[(uint64_t)*pDescriptorUpdateTemplate] = std::move(template_state); 570 } 571 return result; 572 } 573 574 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device, 575 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, 576 const VkAllocationCallbacks *pAllocator) { 577 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 578 std::unique_lock<std::mutex> lock(global_lock); 579 uint64_t descriptor_update_template_id = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); 580 dev_data->desc_template_map.erase(descriptor_update_template_id); 581 descriptorUpdateTemplate = (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[descriptor_update_template_id]; 582 dev_data->unique_id_mapping.erase(descriptor_update_template_id); 583 lock.unlock(); 584 dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator); 585 } 586 587 void *BuildUnwrappedUpdateTemplateBuffer(layer_data *dev_data, uint64_t descriptorUpdateTemplate, const void *pData) { 588 auto const template_map_entry = dev_data->desc_template_map.find(descriptorUpdateTemplate); 589 if (template_map_entry == dev_data->desc_template_map.end()) { 590 assert(0); 591 } 592 auto const &create_info = template_map_entry->second->create_info; 593 size_t allocation_size = 0; 594 std::vector<std::tuple<size_t, VulkanObjectType, void *>> template_entries; 595 596 for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) { 597 for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) { 598 size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride; 599 char *update_entry = (char *)(pData) + offset; 600 601 switch (create_info.pDescriptorUpdateEntries[i].descriptorType) { 602 case VK_DESCRIPTOR_TYPE_SAMPLER: 603 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: 604 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: 605 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: 606 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: { 607 auto image_entry = reinterpret_cast<VkDescriptorImageInfo *>(update_entry); 608 allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorImageInfo)); 609 610 VkDescriptorImageInfo *wrapped_entry = new VkDescriptorImageInfo(*image_entry); 611 wrapped_entry->sampler = Unwrap(dev_data, image_entry->sampler); 612 wrapped_entry->imageView = Unwrap(dev_data, image_entry->imageView); 613 template_entries.emplace_back(offset, kVulkanObjectTypeImage, reinterpret_cast<void *>(wrapped_entry)); 614 } break; 615 616 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: 617 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: 618 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: 619 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { 620 auto buffer_entry = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry); 621 allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorBufferInfo)); 622 623 VkDescriptorBufferInfo *wrapped_entry = new VkDescriptorBufferInfo(*buffer_entry); 624 wrapped_entry->buffer = Unwrap(dev_data, buffer_entry->buffer); 625 template_entries.emplace_back(offset, kVulkanObjectTypeBuffer, reinterpret_cast<void *>(wrapped_entry)); 626 } break; 627 628 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: 629 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { 630 auto buffer_view_handle = reinterpret_cast<VkBufferView *>(update_entry); 631 allocation_size = std::max(allocation_size, offset + sizeof(VkBufferView)); 632 633 VkBufferView wrapped_entry = Unwrap(dev_data, *buffer_view_handle); 634 template_entries.emplace_back(offset, kVulkanObjectTypeBufferView, reinterpret_cast<void *>(wrapped_entry)); 635 } break; 636 default: 637 assert(0); 638 break; 639 } 640 } 641 } 642 // Allocate required buffer size and populate with source/unwrapped data 643 void *unwrapped_data = malloc(allocation_size); 644 for (auto &this_entry : template_entries) { 645 VulkanObjectType type = std::get<1>(this_entry); 646 void *destination = (char *)unwrapped_data + std::get<0>(this_entry); 647 void *source = (char *)std::get<2>(this_entry); 648 649 switch (type) { 650 case kVulkanObjectTypeImage: 651 *(reinterpret_cast<VkDescriptorImageInfo *>(destination)) = *(reinterpret_cast<VkDescriptorImageInfo *>(source)); 652 delete reinterpret_cast<VkDescriptorImageInfo *>(source); 653 break; 654 case kVulkanObjectTypeBuffer: 655 *(reinterpret_cast<VkDescriptorBufferInfo *>(destination)) = *(reinterpret_cast<VkDescriptorBufferInfo *>(source)); 656 delete reinterpret_cast<VkDescriptorBufferInfo *>(source); 657 break; 658 case kVulkanObjectTypeBufferView: 659 *(reinterpret_cast<VkBufferView *>(destination)) = reinterpret_cast<VkBufferView>(source); 660 break; 661 default: 662 assert(0); 663 break; 664 } 665 } 666 return (void *)unwrapped_data; 667 } 668 669 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, 670 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, 671 const void *pData) { 672 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 673 uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); 674 void *unwrapped_buffer = nullptr; 675 { 676 std::lock_guard<std::mutex> lock(global_lock); 677 descriptorSet = Unwrap(dev_data, descriptorSet); 678 descriptorUpdateTemplate = (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[template_handle]; 679 unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(dev_data, template_handle, pData); 680 } 681 dev_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, unwrapped_buffer); 682 free(unwrapped_buffer); 683 } 684 685 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, 686 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, 687 VkPipelineLayout layout, uint32_t set, const void *pData) { 688 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map); 689 uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); 690 void *unwrapped_buffer = nullptr; 691 { 692 std::lock_guard<std::mutex> lock(global_lock); 693 descriptorUpdateTemplate = Unwrap(dev_data, descriptorUpdateTemplate); 694 layout = Unwrap(dev_data, layout); 695 unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(dev_data, template_handle, pData); 696 } 697 dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, 698 unwrapped_buffer); 699 free(unwrapped_buffer); 700 } 701 702 #ifndef __ANDROID__ 703 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, 704 VkDisplayPropertiesKHR *pProperties) { 705 instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); 706 707 VkResult result = 708 my_map_data->dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties); 709 if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { 710 std::lock_guard<std::mutex> lock(global_lock); 711 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { 712 pProperties[idx0].display = WrapNew(my_map_data, pProperties[idx0].display); 713 } 714 } 715 return result; 716 } 717 718 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, 719 uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) { 720 instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); 721 VkResult result = 722 my_map_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays); 723 if (VK_SUCCESS == result) { 724 if ((*pDisplayCount > 0) && pDisplays) { 725 std::lock_guard<std::mutex> lock(global_lock); 726 for (uint32_t i = 0; i < *pDisplayCount; i++) { 727 // TODO: this looks like it really wants a /reverse/ mapping. What's going on here? 728 auto it = my_map_data->unique_id_mapping.find(reinterpret_cast<const uint64_t &>(pDisplays[i])); 729 assert(it != my_map_data->unique_id_mapping.end()); 730 pDisplays[i] = reinterpret_cast<VkDisplayKHR &>(it->second); 731 } 732 } 733 } 734 return result; 735 } 736 737 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, 738 uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) { 739 instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); 740 { 741 std::lock_guard<std::mutex> lock(global_lock); 742 display = Unwrap(my_map_data, display); 743 } 744 745 VkResult result = my_map_data->dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties); 746 if (result == VK_SUCCESS && pProperties) { 747 std::lock_guard<std::mutex> lock(global_lock); 748 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { 749 pProperties[idx0].displayMode = WrapNew(my_map_data, pProperties[idx0].displayMode); 750 } 751 } 752 return result; 753 } 754 755 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, 756 uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) { 757 instance_layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); 758 { 759 std::lock_guard<std::mutex> lock(global_lock); 760 mode = Unwrap(dev_data, mode); 761 } 762 VkResult result = dev_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities); 763 return result; 764 } 765 #endif 766 767 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, const VkDebugMarkerObjectTagInfoEXT *pTagInfo) { 768 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 769 auto local_tag_info = new safe_VkDebugMarkerObjectTagInfoEXT(pTagInfo); 770 { 771 std::lock_guard<std::mutex> lock(global_lock); 772 auto it = device_data->unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_tag_info->object)); 773 if (it != device_data->unique_id_mapping.end()) { 774 local_tag_info->object = it->second; 775 } 776 } 777 VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT( 778 device, reinterpret_cast<VkDebugMarkerObjectTagInfoEXT *>(local_tag_info)); 779 return result; 780 } 781 782 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) { 783 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); 784 auto local_name_info = new safe_VkDebugMarkerObjectNameInfoEXT(pNameInfo); 785 { 786 std::lock_guard<std::mutex> lock(global_lock); 787 auto it = device_data->unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_name_info->object)); 788 if (it != device_data->unique_id_mapping.end()) { 789 local_name_info->object = it->second; 790 } 791 } 792 VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT( 793 device, reinterpret_cast<VkDebugMarkerObjectNameInfoEXT *>(local_name_info)); 794 return result; 795 } 796 797 } // namespace unique_objects 798 799 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, 800 VkExtensionProperties *pProperties) { 801 return unique_objects::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties); 802 } 803 804 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount, 805 VkLayerProperties *pProperties) { 806 return unique_objects::EnumerateInstanceLayerProperties(pCount, pProperties); 807 } 808 809 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, 810 VkLayerProperties *pProperties) { 811 assert(physicalDevice == VK_NULL_HANDLE); 812 return unique_objects::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties); 813 } 814 815 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { 816 return unique_objects::GetDeviceProcAddr(dev, funcName); 817 } 818 819 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { 820 return unique_objects::GetInstanceProcAddr(instance, funcName); 821 } 822 823 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 824 const char *pLayerName, uint32_t *pCount, 825 VkExtensionProperties *pProperties) { 826 assert(physicalDevice == VK_NULL_HANDLE); 827 return unique_objects::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties); 828 } 829 830 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance, 831 const char *funcName) { 832 return unique_objects::GetPhysicalDeviceProcAddr(instance, funcName); 833 } 834 835 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) { 836 assert(pVersionStruct != NULL); 837 assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT); 838 839 // Fill in the function pointers if our version is at least capable of having the structure contain them. 840 if (pVersionStruct->loaderLayerInterfaceVersion >= 2) { 841 pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr; 842 pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr; 843 pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr; 844 } 845 846 if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) { 847 unique_objects::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion; 848 } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) { 849 pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION; 850 } 851 852 return VK_SUCCESS; 853 } 854