1 /* 2 * Copyright 2017 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "VkTestUtils.h" 9 10 #ifdef SK_VULKAN 11 12 #include "SkAutoMalloc.h" 13 #include "vk/GrVkBackendContext.h" 14 #include "vk/GrVkExtensions.h" 15 #include "../ports/SkOSLibrary.h" 16 17 namespace sk_gpu_test { 18 19 bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc, 20 PFN_vkGetDeviceProcAddr* devProc) { 21 #ifdef SK_MOLTENVK 22 // MoltenVK is a statically linked framework, so there is no Vulkan library to load. 23 *instProc = &vkGetInstanceProcAddr; 24 *devProc = &vkGetDeviceProcAddr; 25 return true; 26 #else 27 static void* vkLib = nullptr; 28 static PFN_vkGetInstanceProcAddr localInstProc = nullptr; 29 static PFN_vkGetDeviceProcAddr localDevProc = nullptr; 30 if (!vkLib) { 31 #if defined _WIN32 32 vkLib = DynamicLoadLibrary("vulkan-1.dll"); 33 #else 34 vkLib = DynamicLoadLibrary("libvulkan.so"); 35 #endif 36 if (!vkLib) { 37 return false; 38 } 39 localInstProc = (PFN_vkGetInstanceProcAddr) GetProcedureAddress(vkLib, 40 "vkGetInstanceProcAddr"); 41 localDevProc = (PFN_vkGetDeviceProcAddr) GetProcedureAddress(vkLib, 42 "vkGetDeviceProcAddr"); 43 } 44 if (!localInstProc || !localDevProc) { 45 return false; 46 } 47 *instProc = localInstProc; 48 *devProc = localDevProc; 49 return true; 50 #endif 51 } 52 53 //////////////////////////////////////////////////////////////////////////////// 54 // Helper code to set up Vulkan context objects 55 56 #ifdef SK_ENABLE_VK_LAYERS 57 const char* kDebugLayerNames[] = { 58 // elements of VK_LAYER_LUNARG_standard_validation 59 "VK_LAYER_GOOGLE_threading", 60 "VK_LAYER_LUNARG_parameter_validation", 61 "VK_LAYER_LUNARG_object_tracker", 62 "VK_LAYER_LUNARG_core_validation", 63 "VK_LAYER_GOOGLE_unique_objects", 64 // not included in standard_validation 65 //"VK_LAYER_LUNARG_api_dump", 66 //"VK_LAYER_LUNARG_vktrace", 67 //"VK_LAYER_LUNARG_screenshot", 68 }; 69 70 static uint32_t remove_patch_version(uint32_t specVersion) { 71 return (specVersion >> 12) << 12; 72 } 73 74 // Returns the index into layers array for the layer we want. Returns -1 if not supported. 75 static int should_include_debug_layer(const char* layerName, 76 uint32_t layerCount, VkLayerProperties* layers, 77 uint32_t version) { 78 for (uint32_t i = 0; i < layerCount; ++i) { 79 if (!strcmp(layerName, layers[i].layerName)) { 80 // Since the layers intercept the vulkan calls and forward them on, we need to make sure 81 // layer was written against a version that isn't older than the version of Vulkan we're 82 // using so that it has all the api entry points. 83 if (version <= remove_patch_version(layers[i].specVersion)) { 84 return i; 85 } 86 return -1; 87 } 88 89 } 90 return -1; 91 } 92 93 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback( 94 VkDebugReportFlagsEXT flags, 95 VkDebugReportObjectTypeEXT objectType, 96 uint64_t object, 97 size_t location, 98 int32_t messageCode, 99 const char* pLayerPrefix, 100 const char* pMessage, 101 void* pUserData) { 102 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) { 103 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); 104 return VK_TRUE; // skip further layers 105 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) { 106 // There is currently a bug in the spec which doesn't have 107 // VK_STRUCTURE_TYPE_BLEND_OPERATION_ADVANCED_FEATURES_EXT as an allowable pNext struct in 108 // VkDeviceCreateInfo. So we ignore that warning since it is wrong. 109 if (!strstr(pMessage, 110 "pCreateInfo->pNext chain includes a structure with unexpected VkStructureType " 111 "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT")) { 112 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); 113 } 114 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) { 115 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); 116 } else { 117 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); 118 } 119 return VK_FALSE; 120 } 121 #endif 122 123 #define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device) 124 125 static bool init_instance_extensions_and_layers(GrVkGetProc getProc, 126 uint32_t specVersion, 127 SkTArray<VkExtensionProperties>* instanceExtensions, 128 SkTArray<VkLayerProperties>* instanceLayers) { 129 if (getProc == nullptr) { 130 return false; 131 } 132 133 GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE); 134 GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE); 135 136 if (!EnumerateInstanceExtensionProperties || 137 !EnumerateInstanceLayerProperties) { 138 return false; 139 } 140 141 VkResult res; 142 uint32_t layerCount = 0; 143 #ifdef SK_ENABLE_VK_LAYERS 144 // instance layers 145 res = EnumerateInstanceLayerProperties(&layerCount, nullptr); 146 if (VK_SUCCESS != res) { 147 return false; 148 } 149 VkLayerProperties* layers = new VkLayerProperties[layerCount]; 150 res = EnumerateInstanceLayerProperties(&layerCount, layers); 151 if (VK_SUCCESS != res) { 152 delete[] layers; 153 return false; 154 } 155 156 uint32_t nonPatchVersion = remove_patch_version(specVersion); 157 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) { 158 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers, 159 nonPatchVersion); 160 if (idx != -1) { 161 instanceLayers->push_back() = layers[idx]; 162 } 163 } 164 delete[] layers; 165 #endif 166 167 // instance extensions 168 // via Vulkan implementation and implicitly enabled layers 169 uint32_t extensionCount = 0; 170 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr); 171 if (VK_SUCCESS != res) { 172 return false; 173 } 174 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount]; 175 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions); 176 if (VK_SUCCESS != res) { 177 delete[] extensions; 178 return false; 179 } 180 for (uint32_t i = 0; i < extensionCount; ++i) { 181 instanceExtensions->push_back() = extensions[i]; 182 } 183 delete [] extensions; 184 185 // via explicitly enabled layers 186 layerCount = instanceLayers->count(); 187 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) { 188 uint32_t extensionCount = 0; 189 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName, 190 &extensionCount, nullptr); 191 if (VK_SUCCESS != res) { 192 return false; 193 } 194 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount]; 195 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName, 196 &extensionCount, extensions); 197 if (VK_SUCCESS != res) { 198 delete[] extensions; 199 return false; 200 } 201 for (uint32_t i = 0; i < extensionCount; ++i) { 202 instanceExtensions->push_back() = extensions[i]; 203 } 204 delete[] extensions; 205 } 206 207 return true; 208 } 209 210 static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion, 211 VkInstance inst, VkPhysicalDevice physDev, 212 SkTArray<VkExtensionProperties>* deviceExtensions, 213 SkTArray<VkLayerProperties>* deviceLayers) { 214 if (getProc == nullptr) { 215 return false; 216 } 217 218 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE); 219 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE); 220 221 if (!EnumerateDeviceExtensionProperties || 222 !EnumerateDeviceLayerProperties) { 223 return false; 224 } 225 226 VkResult res; 227 // device layers 228 uint32_t layerCount = 0; 229 #ifdef SK_ENABLE_VK_LAYERS 230 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr); 231 if (VK_SUCCESS != res) { 232 return false; 233 } 234 VkLayerProperties* layers = new VkLayerProperties[layerCount]; 235 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers); 236 if (VK_SUCCESS != res) { 237 delete[] layers; 238 return false; 239 } 240 241 uint32_t nonPatchVersion = remove_patch_version(specVersion); 242 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) { 243 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers, 244 nonPatchVersion); 245 if (idx != -1) { 246 deviceLayers->push_back() = layers[idx]; 247 } 248 } 249 delete[] layers; 250 #endif 251 252 // device extensions 253 // via Vulkan implementation and implicitly enabled layers 254 uint32_t extensionCount = 0; 255 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr); 256 if (VK_SUCCESS != res) { 257 return false; 258 } 259 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount]; 260 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions); 261 if (VK_SUCCESS != res) { 262 delete[] extensions; 263 return false; 264 } 265 for (uint32_t i = 0; i < extensionCount; ++i) { 266 deviceExtensions->push_back() = extensions[i]; 267 } 268 delete[] extensions; 269 270 // via explicitly enabled layers 271 layerCount = deviceLayers->count(); 272 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) { 273 uint32_t extensionCount = 0; 274 res = EnumerateDeviceExtensionProperties(physDev, 275 (*deviceLayers)[layerIndex].layerName, 276 &extensionCount, nullptr); 277 if (VK_SUCCESS != res) { 278 return false; 279 } 280 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount]; 281 res = EnumerateDeviceExtensionProperties(physDev, 282 (*deviceLayers)[layerIndex].layerName, 283 &extensionCount, extensions); 284 if (VK_SUCCESS != res) { 285 delete[] extensions; 286 return false; 287 } 288 for (uint32_t i = 0; i < extensionCount; ++i) { 289 deviceExtensions->push_back() = extensions[i]; 290 } 291 delete[] extensions; 292 } 293 294 return true; 295 } 296 297 #define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \ 298 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)) 299 300 #define ACQUIRE_VK_PROC(name, instance, device) \ 301 PFN_vk##name grVk##name = \ 302 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \ 303 do { \ 304 if (grVk##name == nullptr) { \ 305 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \ 306 if (device != VK_NULL_HANDLE) { \ 307 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \ 308 } \ 309 return false; \ 310 } \ 311 } while (0) 312 313 #define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \ 314 PFN_vk##name grVk##name = \ 315 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \ 316 do { \ 317 if (grVk##name == nullptr) { \ 318 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \ 319 return; \ 320 } \ 321 } while (0) 322 323 static void destroy_instance(GrVkGetProc getProc, VkInstance inst, 324 VkDebugReportCallbackEXT* debugCallback, 325 bool hasDebugExtension) { 326 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) { 327 ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE); 328 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr); 329 *debugCallback = VK_NULL_HANDLE; 330 } 331 ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE); 332 grVkDestroyInstance(inst, nullptr); 333 } 334 335 static void setup_extension_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev, 336 uint32_t physDeviceVersion, GrVkExtensions* extensions, 337 VkPhysicalDeviceFeatures2* features) { 338 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) || 339 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)); 340 341 // Setup all extension feature structs we may want to use. 342 343 void** tailPNext = &features->pNext; 344 345 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr; 346 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) { 347 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw( 348 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT)); 349 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT; 350 blend->pNext = nullptr; 351 *tailPNext = blend; 352 tailPNext = &blend->pNext; 353 } 354 355 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr; 356 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) || 357 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) { 358 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw( 359 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures)); 360 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES; 361 ycbcrFeature->pNext = nullptr; 362 *tailPNext = ycbcrFeature; 363 tailPNext = &ycbcrFeature->pNext; 364 } 365 366 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) { 367 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE); 368 grVkGetPhysicalDeviceFeatures2(physDev, features); 369 } else { 370 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 371 1)); 372 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE); 373 grVkGetPhysicalDeviceFeatures2KHR(physDev, features); 374 } 375 376 // If we want to disable any extension features do so here. 377 } 378 379 bool CreateVkBackendContext(GrVkGetProc getProc, 380 GrVkBackendContext* ctx, 381 GrVkExtensions* extensions, 382 VkPhysicalDeviceFeatures2* features, 383 VkDebugReportCallbackEXT* debugCallback, 384 uint32_t* presentQueueIndexPtr, 385 CanPresentFn canPresent) { 386 VkResult err; 387 388 ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE); 389 uint32_t instanceVersion = 0; 390 if (!grVkEnumerateInstanceVersion) { 391 instanceVersion = VK_MAKE_VERSION(1, 0, 0); 392 } else { 393 err = grVkEnumerateInstanceVersion(&instanceVersion); 394 if (err) { 395 SkDebugf("failed ot enumerate instance version. Err: %d\n", err); 396 return false; 397 } 398 } 399 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0)); 400 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0); 401 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) { 402 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the 403 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest 404 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1 405 // since that is the highest vulkan version. 406 apiVersion = VK_MAKE_VERSION(1, 1, 0); 407 } 408 409 instanceVersion = SkTMin(instanceVersion, apiVersion); 410 411 VkPhysicalDevice physDev; 412 VkDevice device; 413 VkInstance inst; 414 415 const VkApplicationInfo app_info = { 416 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType 417 nullptr, // pNext 418 "vktest", // pApplicationName 419 0, // applicationVersion 420 "vktest", // pEngineName 421 0, // engineVerison 422 apiVersion, // apiVersion 423 }; 424 425 SkTArray<VkLayerProperties> instanceLayers; 426 SkTArray<VkExtensionProperties> instanceExtensions; 427 428 if (!init_instance_extensions_and_layers(getProc, instanceVersion, 429 &instanceExtensions, 430 &instanceLayers)) { 431 return false; 432 } 433 434 SkTArray<const char*> instanceLayerNames; 435 SkTArray<const char*> instanceExtensionNames; 436 for (int i = 0; i < instanceLayers.count(); ++i) { 437 instanceLayerNames.push_back(instanceLayers[i].layerName); 438 } 439 for (int i = 0; i < instanceExtensions.count(); ++i) { 440 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6)) { 441 instanceExtensionNames.push_back(instanceExtensions[i].extensionName); 442 } 443 } 444 445 const VkInstanceCreateInfo instance_create = { 446 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType 447 nullptr, // pNext 448 0, // flags 449 &app_info, // pApplicationInfo 450 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount 451 instanceLayerNames.begin(), // ppEnabledLayerNames 452 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount 453 instanceExtensionNames.begin(), // ppEnabledExtensionNames 454 }; 455 456 bool hasDebugExtension = false; 457 458 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE); 459 err = grVkCreateInstance(&instance_create, nullptr, &inst); 460 if (err < 0) { 461 SkDebugf("vkCreateInstance failed: %d\n", err); 462 return false; 463 } 464 465 #ifdef SK_ENABLE_VK_LAYERS 466 *debugCallback = VK_NULL_HANDLE; 467 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) { 468 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) { 469 hasDebugExtension = true; 470 } 471 } 472 if (hasDebugExtension) { 473 // Setup callback creation information 474 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo; 475 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT; 476 callbackCreateInfo.pNext = nullptr; 477 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | 478 VK_DEBUG_REPORT_WARNING_BIT_EXT | 479 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT | 480 // VK_DEBUG_REPORT_DEBUG_BIT_EXT | 481 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; 482 callbackCreateInfo.pfnCallback = &DebugReportCallback; 483 callbackCreateInfo.pUserData = nullptr; 484 485 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE); 486 // Register the callback 487 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback); 488 } 489 #endif 490 491 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE); 492 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE); 493 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE); 494 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE); 495 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE); 496 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE); 497 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE); 498 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE); 499 500 uint32_t gpuCount; 501 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr); 502 if (err) { 503 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); 504 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); 505 return false; 506 } 507 if (!gpuCount) { 508 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n"); 509 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); 510 return false; 511 } 512 // Just returning the first physical device instead of getting the whole array. 513 // TODO: find best match for our needs 514 gpuCount = 1; 515 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev); 516 // VK_INCOMPLETE is returned when the count we provide is less than the total device count. 517 if (err && VK_INCOMPLETE != err) { 518 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); 519 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); 520 return false; 521 } 522 523 VkPhysicalDeviceProperties physDeviceProperties; 524 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties); 525 int physDeviceVersion = SkTMin(physDeviceProperties.apiVersion, apiVersion); 526 527 // query to get the initial queue props size 528 uint32_t queueCount; 529 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr); 530 if (!queueCount) { 531 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n"); 532 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); 533 return false; 534 } 535 536 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties)); 537 // now get the actual queue props 538 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get(); 539 540 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps); 541 542 // iterate to find the graphics queue 543 uint32_t graphicsQueueIndex = queueCount; 544 for (uint32_t i = 0; i < queueCount; i++) { 545 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { 546 graphicsQueueIndex = i; 547 break; 548 } 549 } 550 if (graphicsQueueIndex == queueCount) { 551 SkDebugf("Could not find any supported graphics queues.\n"); 552 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); 553 return false; 554 } 555 556 // iterate to find the present queue, if needed 557 uint32_t presentQueueIndex = queueCount; 558 if (presentQueueIndexPtr && canPresent) { 559 for (uint32_t i = 0; i < queueCount; i++) { 560 if (canPresent(inst, physDev, i)) { 561 presentQueueIndex = i; 562 break; 563 } 564 } 565 if (presentQueueIndex == queueCount) { 566 SkDebugf("Could not find any supported present queues.\n"); 567 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); 568 return false; 569 } 570 *presentQueueIndexPtr = presentQueueIndex; 571 } else { 572 // Just setting this so we end up make a single queue for graphics since there was no 573 // request for a present queue. 574 presentQueueIndex = graphicsQueueIndex; 575 } 576 577 SkTArray<VkLayerProperties> deviceLayers; 578 SkTArray<VkExtensionProperties> deviceExtensions; 579 if (!init_device_extensions_and_layers(getProc, physDeviceVersion, 580 inst, physDev, 581 &deviceExtensions, 582 &deviceLayers)) { 583 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); 584 return false; 585 } 586 587 SkTArray<const char*> deviceLayerNames; 588 SkTArray<const char*> deviceExtensionNames; 589 for (int i = 0; i < deviceLayers.count(); ++i) { 590 deviceLayerNames.push_back(deviceLayers[i].layerName); 591 } 592 for (int i = 0; i < deviceExtensions.count(); ++i) { 593 // Don't use experimental extensions since they typically don't work with debug layers and 594 // often are missing dependecy requirements for other extensions. Additionally, these are 595 // often left behind in the driver even after they've been promoted to real extensions. 596 if (strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) && 597 strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) { 598 deviceExtensionNames.push_back(deviceExtensions[i].extensionName); 599 } 600 } 601 602 extensions->init(getProc, inst, physDev, 603 (uint32_t) instanceExtensionNames.count(), 604 instanceExtensionNames.begin(), 605 (uint32_t) deviceExtensionNames.count(), 606 deviceExtensionNames.begin()); 607 608 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2)); 609 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; 610 features->pNext = nullptr; 611 612 VkPhysicalDeviceFeatures* deviceFeatures = &features->features; 613 void* pointerToFeatures = nullptr; 614 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) || 615 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) { 616 setup_extension_features(getProc, inst, physDev, physDeviceVersion, extensions, features); 617 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct, 618 // the device creation will use that instead of the ppEnabledFeatures. 619 pointerToFeatures = features; 620 } else { 621 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures); 622 } 623 624 // this looks like it would slow things down, 625 // and we can't depend on it on all platforms 626 deviceFeatures->robustBufferAccess = VK_FALSE; 627 628 float queuePriorities[1] = { 0.0 }; 629 // Here we assume no need for swapchain queue 630 // If one is needed, the client will need its own setup code 631 const VkDeviceQueueCreateInfo queueInfo[2] = { 632 { 633 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType 634 nullptr, // pNext 635 0, // VkDeviceQueueCreateFlags 636 graphicsQueueIndex, // queueFamilyIndex 637 1, // queueCount 638 queuePriorities, // pQueuePriorities 639 }, 640 { 641 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType 642 nullptr, // pNext 643 0, // VkDeviceQueueCreateFlags 644 presentQueueIndex, // queueFamilyIndex 645 1, // queueCount 646 queuePriorities, // pQueuePriorities 647 } 648 }; 649 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1; 650 651 const VkDeviceCreateInfo deviceInfo = { 652 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType 653 pointerToFeatures, // pNext 654 0, // VkDeviceCreateFlags 655 queueInfoCount, // queueCreateInfoCount 656 queueInfo, // pQueueCreateInfos 657 (uint32_t) deviceLayerNames.count(), // layerCount 658 deviceLayerNames.begin(), // ppEnabledLayerNames 659 (uint32_t) deviceExtensionNames.count(), // extensionCount 660 deviceExtensionNames.begin(), // ppEnabledExtensionNames 661 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures 662 }; 663 664 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device); 665 if (err) { 666 SkDebugf("CreateDevice failed: %d\n", err); 667 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); 668 return false; 669 } 670 671 VkQueue queue; 672 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); 673 674 ctx->fInstance = inst; 675 ctx->fPhysicalDevice = physDev; 676 ctx->fDevice = device; 677 ctx->fQueue = queue; 678 ctx->fGraphicsQueueIndex = graphicsQueueIndex; 679 ctx->fMaxAPIVersion = apiVersion; 680 ctx->fVkExtensions = extensions; 681 ctx->fDeviceFeatures2 = features; 682 ctx->fGetProc = getProc; 683 ctx->fOwnsInstanceAndDevice = false; 684 685 return true; 686 } 687 688 void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) { 689 // All Vulkan structs that could be part of the features chain will start with the 690 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader 691 // so we can get access to the pNext for the next struct. 692 struct CommonVulkanHeader { 693 VkStructureType sType; 694 void* pNext; 695 }; 696 697 void* pNext = features->pNext; 698 while (pNext) { 699 void* current = pNext; 700 pNext = static_cast<CommonVulkanHeader*>(current)->pNext; 701 sk_free(current); 702 } 703 } 704 705 } 706 707 #endif 708