1 /* Copyright (c) 2015-2016 The Khronos Group Inc. 2 * Copyright (c) 2015-2016 Valve Corporation 3 * Copyright (c) 2015-2016 LunarG, Inc. 4 * Copyright (C) 2015-2016 Google Inc. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 * 18 * Author: Mark Lobodzinski <mark (at) lunarg.com> 19 * Author: Mike Stroyan <mike (at) LunarG.com> 20 * Author: Tobin Ehlis <tobin (at) lunarg.com> 21 */ 22 23 #include <stdio.h> 24 #include <stdlib.h> 25 #include <string.h> 26 #include <unordered_map> 27 #include <memory> 28 29 #include "vk_loader_platform.h" 30 #include "vk_dispatch_table_helper.h" 31 #if defined(__GNUC__) 32 #pragma GCC diagnostic ignored "-Wwrite-strings" 33 #endif 34 #if defined(__GNUC__) 35 #pragma GCC diagnostic warning "-Wwrite-strings" 36 #endif 37 #include "vk_struct_size_helper.h" 38 #include "device_limits.h" 39 #include "vulkan/vk_layer.h" 40 #include "vk_layer_config.h" 41 #include "vk_enum_validate_helper.h" 42 #include "vk_layer_table.h" 43 #include "vk_layer_data.h" 44 #include "vk_layer_logging.h" 45 #include "vk_layer_extension_utils.h" 46 #include "vk_layer_utils.h" 47 48 namespace device_limits { 49 50 // This struct will be stored in a map hashed by the dispatchable object 51 struct layer_data { 52 VkInstance instance; 53 54 debug_report_data *report_data; 55 std::vector<VkDebugReportCallbackEXT> logging_callback; 56 VkLayerDispatchTable *device_dispatch_table; 57 VkLayerInstanceDispatchTable *instance_dispatch_table; 58 // Track state of each instance 59 unique_ptr<INSTANCE_STATE> instanceState; 60 unique_ptr<PHYSICAL_DEVICE_STATE> physicalDeviceState; 61 VkPhysicalDeviceFeatures actualPhysicalDeviceFeatures; 62 VkPhysicalDeviceFeatures requestedPhysicalDeviceFeatures; 63 64 // Track physical device per logical device 65 VkPhysicalDevice physicalDevice; 66 VkPhysicalDeviceProperties physicalDeviceProperties; 67 // Vector indices correspond to queueFamilyIndex 68 vector<unique_ptr<VkQueueFamilyProperties>> queueFamilyProperties; 69 70 layer_data() 71 : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), instanceState(nullptr), 72 physicalDeviceState(nullptr), actualPhysicalDeviceFeatures(), requestedPhysicalDeviceFeatures(), physicalDevice(){}; 73 }; 74 75 static unordered_map<void *, layer_data *> layer_data_map; 76 77 // TODO : This can be much smarter, using separate locks for separate global data 78 static int globalLockInitialized = 0; 79 static loader_platform_thread_mutex globalLock; 80 81 static void init_device_limits(layer_data *my_data, const VkAllocationCallbacks *pAllocator) { 82 83 layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_device_limits"); 84 85 if (!globalLockInitialized) { 86 // TODO/TBD: Need to delete this mutex sometime. How??? One 87 // suggestion is to call this during vkCreateInstance(), and then we 88 // can clean it up during vkDestroyInstance(). However, that requires 89 // that the layer have per-instance locks. We need to come back and 90 // address this soon. 91 loader_platform_thread_create_mutex(&globalLock); 92 globalLockInitialized = 1; 93 } 94 } 95 96 static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}}; 97 98 static const VkLayerProperties global_layer = { 99 "VK_LAYER_LUNARG_device_limits", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer", 100 }; 101 102 VKAPI_ATTR VkResult VKAPI_CALL 103 CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) { 104 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 105 106 assert(chain_info->u.pLayerInfo); 107 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 108 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); 109 if (fpCreateInstance == NULL) { 110 return VK_ERROR_INITIALIZATION_FAILED; 111 } 112 113 // Advance the link info for the next element on the chain 114 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 115 116 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); 117 if (result != VK_SUCCESS) 118 return result; 119 120 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map); 121 my_data->instance = *pInstance; 122 my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable; 123 layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr); 124 125 my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance, 126 pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames); 127 128 init_device_limits(my_data, pAllocator); 129 my_data->instanceState = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE()); 130 131 return VK_SUCCESS; 132 } 133 134 /* hook DestroyInstance to remove tableInstanceMap entry */ 135 VKAPI_ATTR void VKAPI_CALL 136 DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { 137 dispatch_key key = get_dispatch_key(instance); 138 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 139 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 140 pTable->DestroyInstance(instance, pAllocator); 141 142 // Clean up logging callback, if any 143 while (my_data->logging_callback.size() > 0) { 144 VkDebugReportCallbackEXT callback = my_data->logging_callback.back(); 145 layer_destroy_msg_callback(my_data->report_data, callback, pAllocator); 146 my_data->logging_callback.pop_back(); 147 } 148 149 layer_debug_report_destroy_instance(my_data->report_data); 150 delete my_data->instance_dispatch_table; 151 layer_data_map.erase(key); 152 if (layer_data_map.empty()) { 153 // Release mutex when destroying last instance. 154 loader_platform_thread_delete_mutex(&globalLock); 155 globalLockInitialized = 0; 156 } 157 } 158 159 VKAPI_ATTR VkResult VKAPI_CALL 160 EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices) { 161 bool skipCall = false; 162 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 163 if (my_data->instanceState) { 164 // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS 165 if (NULL == pPhysicalDevices) { 166 my_data->instanceState->vkEnumeratePhysicalDevicesState = QUERY_COUNT; 167 } else { 168 if (UNCALLED == my_data->instanceState->vkEnumeratePhysicalDevicesState) { 169 // Flag error here, shouldn't be calling this without having queried count 170 skipCall |= 171 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, 172 __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL", 173 "Invalid call sequence to vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first " 174 "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount."); 175 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state 176 else if (my_data->instanceState->physicalDevicesCount != *pPhysicalDeviceCount) { 177 // TODO: Having actual count match count from app is not a requirement, so this can be a warning 178 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 179 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL", 180 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count " 181 "supported by this instance is %u.", 182 *pPhysicalDeviceCount, my_data->instanceState->physicalDevicesCount); 183 } 184 my_data->instanceState->vkEnumeratePhysicalDevicesState = QUERY_DETAILS; 185 } 186 if (skipCall) 187 return VK_ERROR_VALIDATION_FAILED_EXT; 188 VkResult result = 189 my_data->instance_dispatch_table->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices); 190 if (NULL == pPhysicalDevices) { 191 my_data->instanceState->physicalDevicesCount = *pPhysicalDeviceCount; 192 } else { // Save physical devices 193 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) { 194 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(pPhysicalDevices[i]), layer_data_map); 195 phy_dev_data->physicalDeviceState = unique_ptr<PHYSICAL_DEVICE_STATE>(new PHYSICAL_DEVICE_STATE()); 196 // Init actual features for each physical device 197 my_data->instance_dispatch_table->GetPhysicalDeviceFeatures(pPhysicalDevices[i], 198 &(phy_dev_data->actualPhysicalDeviceFeatures)); 199 } 200 } 201 return result; 202 } else { 203 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, 204 DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().", 205 (uint64_t)instance); 206 } 207 return VK_ERROR_VALIDATION_FAILED_EXT; 208 } 209 210 VKAPI_ATTR void VKAPI_CALL 211 GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures *pFeatures) { 212 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map); 213 phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceFeaturesState = QUERY_DETAILS; 214 phy_dev_data->instance_dispatch_table->GetPhysicalDeviceFeatures(physicalDevice, pFeatures); 215 } 216 217 VKAPI_ATTR void VKAPI_CALL 218 GetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties *pFormatProperties) { 219 get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map) 220 ->instance_dispatch_table->GetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties); 221 } 222 223 VKAPI_ATTR VkResult VKAPI_CALL 224 GetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, 225 VkImageUsageFlags usage, VkImageCreateFlags flags, 226 VkImageFormatProperties *pImageFormatProperties) { 227 return get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map) 228 ->instance_dispatch_table->GetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags, 229 pImageFormatProperties); 230 } 231 232 VKAPI_ATTR void VKAPI_CALL 233 GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties) { 234 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map); 235 phy_dev_data->instance_dispatch_table->GetPhysicalDeviceProperties(physicalDevice, pProperties); 236 } 237 238 VKAPI_ATTR void VKAPI_CALL 239 GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, 240 VkQueueFamilyProperties *pQueueFamilyProperties) { 241 bool skipCall = false; 242 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map); 243 if (phy_dev_data->physicalDeviceState) { 244 if (NULL == pQueueFamilyProperties) { 245 phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT; 246 } else { 247 // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to 248 // get count 249 if (UNCALLED == phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState) { 250 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 251 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL", 252 "Invalid call sequence to vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL " 253 "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ " 254 "NULL pQueueFamilyProperties to query pCount."); 255 } 256 // Then verify that pCount that is passed in on second call matches what was returned 257 if (phy_dev_data->physicalDeviceState->queueFamilyPropertiesCount != *pCount) { 258 259 // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so 260 // provide as warning 261 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 262 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL", 263 "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count " 264 "supported by this physicalDevice is %u.", 265 *pCount, phy_dev_data->physicalDeviceState->queueFamilyPropertiesCount); 266 } 267 phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS; 268 } 269 if (skipCall) 270 return; 271 phy_dev_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, 272 pQueueFamilyProperties); 273 if (NULL == pQueueFamilyProperties) { 274 phy_dev_data->physicalDeviceState->queueFamilyPropertiesCount = *pCount; 275 } else { // Save queue family properties 276 phy_dev_data->queueFamilyProperties.reserve(*pCount); 277 for (uint32_t i = 0; i < *pCount; i++) { 278 phy_dev_data->queueFamilyProperties.emplace_back(new VkQueueFamilyProperties(pQueueFamilyProperties[i])); 279 } 280 } 281 return; 282 } else { 283 log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 284 __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL", 285 "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().", 286 (uint64_t)physicalDevice); 287 } 288 } 289 290 VKAPI_ATTR void VKAPI_CALL 291 GetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) { 292 get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map) 293 ->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties); 294 } 295 296 VKAPI_ATTR void VKAPI_CALL 297 GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, 298 VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, 299 uint32_t *pNumProperties, VkSparseImageFormatProperties *pProperties) { 300 get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map) 301 ->instance_dispatch_table->GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, 302 tiling, pNumProperties, pProperties); 303 } 304 305 VKAPI_ATTR void VKAPI_CALL 306 CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { 307 bool skipCall = false; 308 /* TODO: Verify viewportCount < maxViewports from VkPhysicalDeviceLimits */ 309 if (!skipCall) { 310 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 311 my_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports); 312 } 313 } 314 315 VKAPI_ATTR void VKAPI_CALL 316 CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { 317 bool skipCall = false; 318 /* TODO: Verify scissorCount < maxViewports from VkPhysicalDeviceLimits */ 319 /* TODO: viewportCount and scissorCount must match at draw time */ 320 if (!skipCall) { 321 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 322 my_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors); 323 } 324 } 325 326 // Verify that features have been queried and verify that requested features are available 327 static bool validate_features_request(layer_data *phy_dev_data) { 328 bool skipCall = false; 329 // Verify that all of the requested features are available 330 // Get ptrs into actual and requested structs and if requested is 1 but actual is 0, request is invalid 331 VkBool32 *actual = (VkBool32 *)&(phy_dev_data->actualPhysicalDeviceFeatures); 332 VkBool32 *requested = (VkBool32 *)&(phy_dev_data->requestedPhysicalDeviceFeatures); 333 // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues 334 // Need to provide the struct member name with the issue. To do that seems like we'll 335 // have to loop through each struct member which should be done w/ codegen to keep in synch. 336 uint32_t errors = 0; 337 uint32_t totalBools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); 338 for (uint32_t i = 0; i < totalBools; i++) { 339 if (requested[i] > actual[i]) { 340 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 341 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, 342 "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, " 343 "which is not available on this device.", 344 i); 345 errors++; 346 } 347 } 348 if (errors && (UNCALLED == phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceFeaturesState)) { 349 // If user didn't request features, notify them that they should 350 // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error 351 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 352 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL", 353 "You requested features that are unavailable on this device. You should first query feature " 354 "availability by calling vkGetPhysicalDeviceFeatures()."); 355 } 356 return skipCall; 357 } 358 359 VKAPI_ATTR VkResult VKAPI_CALL 360 CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, 361 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { 362 bool skipCall = false; 363 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); 364 // First check is app has actually requested queueFamilyProperties 365 if (!phy_dev_data->physicalDeviceState) { 366 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 367 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL", 368 "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices()."); 369 } else if (QUERY_DETAILS != phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState) { 370 // TODO: This is not called out as an invalid use in the spec so make more informative recommendation. 371 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 372 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, 373 "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties()."); 374 } else { 375 // Check that the requested queue properties are valid 376 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) { 377 uint32_t requestedIndex = pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex; 378 if (phy_dev_data->queueFamilyProperties.size() <= 379 requestedIndex) { // requested index is out of bounds for this physical device 380 skipCall |= log_msg( 381 phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 382 __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL", 383 "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex); 384 } else if (pCreateInfo->pQueueCreateInfos[i].queueCount > 385 phy_dev_data->queueFamilyProperties[requestedIndex]->queueCount) { 386 skipCall |= 387 log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 388 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, 389 "DL", "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but " 390 "requested queueCount is %u.", 391 requestedIndex, phy_dev_data->queueFamilyProperties[requestedIndex]->queueCount, 392 pCreateInfo->pQueueCreateInfos[i].queueCount); 393 } 394 } 395 } 396 // Check that any requested features are available 397 if (pCreateInfo->pEnabledFeatures) { 398 phy_dev_data->requestedPhysicalDeviceFeatures = *(pCreateInfo->pEnabledFeatures); 399 skipCall |= validate_features_request(phy_dev_data); 400 } 401 if (skipCall) 402 return VK_ERROR_VALIDATION_FAILED_EXT; 403 404 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 405 406 assert(chain_info->u.pLayerInfo); 407 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 408 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; 409 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(phy_dev_data->instance, "vkCreateDevice"); 410 if (fpCreateDevice == NULL) { 411 return VK_ERROR_INITIALIZATION_FAILED; 412 } 413 414 // Advance the link info for the next element on the chain 415 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 416 417 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); 418 if (result != VK_SUCCESS) { 419 return result; 420 } 421 422 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map); 423 my_device_data->device_dispatch_table = new VkLayerDispatchTable; 424 layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr); 425 my_device_data->report_data = layer_debug_report_create_device(phy_dev_data->report_data, *pDevice); 426 my_device_data->physicalDevice = gpu; 427 428 // Get physical device properties for this device 429 phy_dev_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->physicalDeviceProperties)); 430 return result; 431 } 432 433 VKAPI_ATTR void VKAPI_CALL 434 DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { 435 // Free device lifetime allocations 436 dispatch_key key = get_dispatch_key(device); 437 layer_data *my_device_data = get_my_data_ptr(key, layer_data_map); 438 my_device_data->device_dispatch_table->DestroyDevice(device, pAllocator); 439 delete my_device_data->device_dispatch_table; 440 layer_data_map.erase(key); 441 } 442 443 VKAPI_ATTR VkResult VKAPI_CALL 444 CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 445 const VkAllocationCallbacks *pAllocator, 446 VkRenderPass *pRenderPass) { 447 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 448 bool skip_call = false; 449 uint32_t max_color_attachments = dev_data->physicalDeviceProperties.limits.maxColorAttachments; 450 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 451 if (pCreateInfo->pSubpasses[i].colorAttachmentCount > max_color_attachments) { 452 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 453 reinterpret_cast<uint64_t>(device), __LINE__, DEVLIMITS_INVALID_ATTACHMENT_COUNT, "DL", 454 "Cannot create a render pass with %d color attachments. Max is %d.", 455 pCreateInfo->pSubpasses[i].colorAttachmentCount, max_color_attachments); 456 } 457 } 458 if (skip_call) { 459 return VK_ERROR_VALIDATION_FAILED_EXT; 460 } 461 return dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); 462 } 463 464 VKAPI_ATTR VkResult VKAPI_CALL 465 CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, 466 const VkAllocationCallbacks *pAllocator, 467 VkCommandPool *pCommandPool) { 468 // TODO : Verify that requested QueueFamilyIndex for this pool exists 469 VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map) 470 ->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool); 471 return result; 472 } 473 474 VKAPI_ATTR void VKAPI_CALL 475 DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { 476 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 477 ->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator); 478 } 479 480 VKAPI_ATTR VkResult VKAPI_CALL 481 ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) { 482 VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map) 483 ->device_dispatch_table->ResetCommandPool(device, commandPool, flags); 484 return result; 485 } 486 487 VKAPI_ATTR VkResult VKAPI_CALL 488 AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) { 489 VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map) 490 ->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer); 491 return result; 492 } 493 494 VKAPI_ATTR void VKAPI_CALL 495 FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t count, const VkCommandBuffer *pCommandBuffers) { 496 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 497 ->device_dispatch_table->FreeCommandBuffers(device, commandPool, count, pCommandBuffers); 498 } 499 500 VKAPI_ATTR VkResult VKAPI_CALL 501 BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { 502 bool skipCall = false; 503 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 504 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(dev_data->physicalDevice), layer_data_map); 505 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo; 506 if (phy_dev_data->actualPhysicalDeviceFeatures.inheritedQueries == VK_FALSE && pInfo && pInfo->occlusionQueryEnable != VK_FALSE) { 507 skipCall |= log_msg( 508 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 509 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DEVLIMITS_INVALID_INHERITED_QUERY, "DL", 510 "Cannot set inherited occlusionQueryEnable in vkBeginCommandBuffer() when device does not support inheritedQueries."); 511 } 512 if (phy_dev_data->actualPhysicalDeviceFeatures.inheritedQueries != VK_FALSE && pInfo && pInfo->occlusionQueryEnable != VK_FALSE && 513 !validate_VkQueryControlFlagBits(VkQueryControlFlagBits(pInfo->queryFlags))) { 514 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 515 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DEVLIMITS_INVALID_INHERITED_QUERY, "DL", 516 "Cannot enable in occlusion queries in vkBeginCommandBuffer() and set queryFlags to %d which is not a " 517 "valid combination of VkQueryControlFlagBits.", 518 pInfo->queryFlags); 519 } 520 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 521 if (!skipCall) 522 result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo); 523 return result; 524 } 525 526 VKAPI_ATTR void VKAPI_CALL 527 GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) { 528 bool skipCall = false; 529 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 530 VkPhysicalDevice gpu = dev_data->physicalDevice; 531 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); 532 if (queueFamilyIndex >= 533 phy_dev_data->queueFamilyProperties.size()) { // requested index is out of bounds for this physical device 534 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 535 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, 536 "DL", "Invalid queueFamilyIndex %u requested in vkGetDeviceQueue().", queueFamilyIndex); 537 } else if (queueIndex >= phy_dev_data->queueFamilyProperties[queueFamilyIndex]->queueCount) { 538 skipCall |= log_msg( 539 phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 540 DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL", 541 "Invalid queue request in vkGetDeviceQueue(). QueueFamilyIndex %u only has %u queues, but requested queueIndex is %u.", 542 queueFamilyIndex, phy_dev_data->queueFamilyProperties[queueFamilyIndex]->queueCount, queueIndex); 543 } 544 if (!skipCall) 545 dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue); 546 } 547 548 VKAPI_ATTR void VKAPI_CALL 549 UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, 550 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { 551 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 552 bool skipCall = false; 553 554 for (uint32_t i = 0; i < descriptorWriteCount; i++) { 555 if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || 556 (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)) { 557 VkDeviceSize uniformAlignment = dev_data->physicalDeviceProperties.limits.minUniformBufferOffsetAlignment; 558 for (uint32_t j = 0; j < pDescriptorWrites[i].descriptorCount; j++) { 559 if (vk_safe_modulo(pDescriptorWrites[i].pBufferInfo[j].offset, uniformAlignment) != 0) { 560 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 561 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 562 DEVLIMITS_INVALID_UNIFORM_BUFFER_OFFSET, "DL", 563 "vkUpdateDescriptorSets(): pDescriptorWrites[%d].pBufferInfo[%d].offset (0x%" PRIxLEAST64 564 ") must be a multiple of device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64, 565 i, j, pDescriptorWrites[i].pBufferInfo[j].offset, uniformAlignment); 566 } 567 } 568 } else if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) || 569 (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { 570 VkDeviceSize storageAlignment = dev_data->physicalDeviceProperties.limits.minStorageBufferOffsetAlignment; 571 for (uint32_t j = 0; j < pDescriptorWrites[i].descriptorCount; j++) { 572 if (vk_safe_modulo(pDescriptorWrites[i].pBufferInfo[j].offset, storageAlignment) != 0) { 573 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 574 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 575 DEVLIMITS_INVALID_STORAGE_BUFFER_OFFSET, "DL", 576 "vkUpdateDescriptorSets(): pDescriptorWrites[%d].pBufferInfo[%d].offset (0x%" PRIxLEAST64 577 ") must be a multiple of device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64, 578 i, j, pDescriptorWrites[i].pBufferInfo[j].offset, storageAlignment); 579 } 580 } 581 } 582 } 583 if (!skipCall) { 584 dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 585 pDescriptorCopies); 586 } 587 } 588 589 VKAPI_ATTR void VKAPI_CALL 590 CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, 591 VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) { 592 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 593 594 // dstOffset is the byte offset into the buffer to start updating and must be a multiple of 4. 595 if (dstOffset & 3) { 596 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 597 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 598 DEVLIMITS_INVALID_BUFFER_UPDATE_ALIGNMENT, "DL", 599 "vkCmdUpdateBuffer parameter, VkDeviceSize dstOffset, is not a multiple of 4")) { 600 return; 601 } 602 } 603 604 // dataSize is the number of bytes to update, which must be a multiple of 4. 605 if (dataSize & 3) { 606 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 607 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 608 DEVLIMITS_INVALID_BUFFER_UPDATE_ALIGNMENT, "DL", 609 "vkCmdUpdateBuffer parameter, VkDeviceSize dataSize, is not a multiple of 4")) { 610 return; 611 } 612 } 613 614 dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData); 615 } 616 617 VKAPI_ATTR void VKAPI_CALL 618 CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) { 619 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 620 621 // dstOffset is the byte offset into the buffer to start filling and must be a multiple of 4. 622 if (dstOffset & 3) { 623 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 624 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 625 DEVLIMITS_INVALID_BUFFER_UPDATE_ALIGNMENT, "DL", 626 "vkCmdFillBuffer parameter, VkDeviceSize dstOffset, is not a multiple of 4")) { 627 return; 628 } 629 } 630 631 // size is the number of bytes to fill, which must be a multiple of 4. 632 if (size & 3) { 633 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 634 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 635 DEVLIMITS_INVALID_BUFFER_UPDATE_ALIGNMENT, "DL", 636 "vkCmdFillBuffer parameter, VkDeviceSize size, is not a multiple of 4")) { 637 return; 638 } 639 } 640 641 dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data); 642 } 643 644 VKAPI_ATTR VkResult VKAPI_CALL 645 CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, 646 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) { 647 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 648 VkResult res = my_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); 649 if (VK_SUCCESS == res) { 650 res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback); 651 } 652 return res; 653 } 654 655 VKAPI_ATTR void VKAPI_CALL 656 DestroyDebugReportCallbackEXT(VkInstance instance, 657 VkDebugReportCallbackEXT msgCallback, 658 const VkAllocationCallbacks *pAllocator) { 659 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 660 my_data->instance_dispatch_table->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); 661 layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator); 662 } 663 664 VKAPI_ATTR void VKAPI_CALL 665 DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object, 666 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { 667 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 668 my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, 669 pMsg); 670 } 671 672 VKAPI_ATTR VkResult VKAPI_CALL 673 EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 674 const char *pLayerName, uint32_t *pCount, 675 VkExtensionProperties *pProperties) { 676 if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) 677 return util_GetExtensionProperties(0, nullptr, pCount, pProperties); 678 679 assert(physicalDevice); 680 681 dispatch_key key = get_dispatch_key(physicalDevice); 682 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 683 return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, pLayerName, pCount, pProperties); 684 } 685 686 static PFN_vkVoidFunction 687 intercept_core_instance_command(const char *name); 688 689 static PFN_vkVoidFunction 690 intercept_core_device_command(const char *name); 691 692 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL 693 GetDeviceProcAddr(VkDevice dev, const char *funcName) { 694 PFN_vkVoidFunction proc = intercept_core_device_command(funcName); 695 if (proc) 696 return proc; 697 698 assert(dev); 699 700 layer_data *my_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map); 701 VkLayerDispatchTable *pTable = my_data->device_dispatch_table; 702 { 703 if (pTable->GetDeviceProcAddr == NULL) 704 return NULL; 705 return pTable->GetDeviceProcAddr(dev, funcName); 706 } 707 } 708 709 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL 710 GetInstanceProcAddr(VkInstance instance, const char *funcName) { 711 PFN_vkVoidFunction proc = intercept_core_instance_command(funcName); 712 if (!proc) 713 intercept_core_device_command(funcName); 714 if (proc) 715 return proc; 716 717 layer_data *my_data; 718 719 assert(instance); 720 my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 721 722 proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName); 723 if (proc) 724 return proc; 725 726 { 727 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 728 if (pTable->GetInstanceProcAddr == NULL) 729 return NULL; 730 return pTable->GetInstanceProcAddr(instance, funcName); 731 } 732 } 733 734 static PFN_vkVoidFunction 735 intercept_core_instance_command(const char *name) { 736 static const struct { 737 const char *name; 738 PFN_vkVoidFunction proc; 739 } core_instance_commands[] = { 740 { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) }, 741 { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) }, 742 { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) }, 743 { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) }, 744 { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) }, 745 { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) }, 746 { "vkGetPhysicalDeviceFeatures", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceFeatures) }, 747 { "vkGetPhysicalDeviceFormatProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceFormatProperties) }, 748 { "vkGetPhysicalDeviceImageFormatProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceImageFormatProperties) }, 749 { "vkGetPhysicalDeviceProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceProperties) }, 750 { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) }, 751 { "vkGetPhysicalDeviceMemoryProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceMemoryProperties) }, 752 { "vkGetPhysicalDeviceSparseImageFormatProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSparseImageFormatProperties) }, 753 { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) }, 754 }; 755 756 // we should never be queried for these commands 757 assert(strcmp(name, "vkEnumerateInstanceLayerProperties") && 758 strcmp(name, "vkEnumerateInstanceExtensionProperties") && 759 strcmp(name, "vkEnumerateDeviceLayerProperties")); 760 761 for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) { 762 if (!strcmp(core_instance_commands[i].name, name)) 763 return core_instance_commands[i].proc; 764 } 765 766 return nullptr; 767 } 768 769 static PFN_vkVoidFunction 770 intercept_core_device_command(const char *name) { 771 static const struct { 772 const char *name; 773 PFN_vkVoidFunction proc; 774 } core_device_commands[] = { 775 { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) }, 776 { "vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice) }, 777 { "vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue) }, 778 { "vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass) }, 779 { "vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool) }, 780 { "vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool) }, 781 { "vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool) }, 782 { "vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers) }, 783 { "vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers) }, 784 { "vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer) }, 785 { "vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer) }, 786 { "vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets) }, 787 { "vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer) }, 788 { "vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor) }, 789 { "vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport) }, 790 }; 791 792 for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) { 793 if (!strcmp(core_device_commands[i].name, name)) 794 return core_device_commands[i].proc; 795 } 796 797 return nullptr; 798 } 799 800 } // namespace device_limits 801 802 // vk_layer_logging.h expects these to be defined 803 804 VKAPI_ATTR VkResult VKAPI_CALL 805 vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, 806 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) { 807 return device_limits::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); 808 } 809 810 VKAPI_ATTR void VKAPI_CALL 811 vkDestroyDebugReportCallbackEXT(VkInstance instance, 812 VkDebugReportCallbackEXT msgCallback, 813 const VkAllocationCallbacks *pAllocator) { 814 device_limits::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); 815 } 816 817 VKAPI_ATTR void VKAPI_CALL 818 vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object, 819 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { 820 device_limits::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg); 821 } 822 823 // loader-layer interface v0 824 825 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 826 vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { 827 return util_GetLayerProperties(1, &device_limits::global_layer, pCount, pProperties); 828 } 829 830 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 831 vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) { 832 return util_GetLayerProperties(1, &device_limits::global_layer, pCount, pProperties); 833 } 834 835 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 836 vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) { 837 return util_GetExtensionProperties(1, device_limits::instance_extensions, pCount, pProperties); 838 } 839 840 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 841 const char *pLayerName, uint32_t *pCount, 842 VkExtensionProperties *pProperties) { 843 // the layer command handles VK_NULL_HANDLE just fine 844 return device_limits::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties); 845 } 846 847 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { 848 return device_limits::GetDeviceProcAddr(dev, funcName); 849 } 850 851 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { 852 if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties")) 853 return reinterpret_cast<PFN_vkVoidFunction>(vkEnumerateInstanceLayerProperties); 854 if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties")) 855 return reinterpret_cast<PFN_vkVoidFunction>(vkEnumerateDeviceLayerProperties); 856 if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties")) 857 return reinterpret_cast<PFN_vkVoidFunction>(vkEnumerateInstanceExtensionProperties); 858 if (!strcmp(funcName, "vkGetInstanceProcAddr")) 859 return reinterpret_cast<PFN_vkVoidFunction>(vkGetInstanceProcAddr); 860 861 return device_limits::GetInstanceProcAddr(instance, funcName); 862 } 863