Home | History | Annotate | Download | only in tests
      1 /*
      2  * Copyright (c) 2015-2016 The Khronos Group Inc.
      3  * Copyright (c) 2015-2016 Valve Corporation
      4  * Copyright (c) 2015-2016 LunarG, Inc.
      5  *
      6  * Licensed under the Apache License, Version 2.0 (the "License");
      7  * you may not use this file except in compliance with the License.
      8  * You may obtain a copy of the License at
      9  *
     10  *     http://www.apache.org/licenses/LICENSE-2.0
     11  *
     12  * Unless required by applicable law or agreed to in writing, software
     13  * distributed under the License is distributed on an "AS IS" BASIS,
     14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     15  * See the License for the specific language governing permissions and
     16  * limitations under the License.
     17  *
     18  * Author: Courtney Goeltzenleuchter <courtney (at) LunarG.com>
     19  * Author: Tony Barbour <tony (at) LunarG.com>
     20  */
     21 
     22 #include "vktestbinding.h"
     23 #include <assert.h>
     24 #include <iostream>
     25 #include <stdarg.h>
     26 #include <string.h> // memset(), memcmp()
     27 
     28 namespace {
     29 
     30 #define NON_DISPATCHABLE_HANDLE_INIT(create_func, dev, ...)                                                                        \
     31     do {                                                                                                                           \
     32         handle_type handle;                                                                                                        \
     33         if (EXPECT(create_func(dev.handle(), __VA_ARGS__, NULL, &handle) == VK_SUCCESS))                                           \
     34             NonDispHandle::init(dev.handle(), handle);                                                                             \
     35     } while (0)
     36 
     37 #define NON_DISPATCHABLE_HANDLE_DTOR(cls, destroy_func)                                                                            \
     38     cls::~cls() {                                                                                                                  \
     39         if (initialized())                                                                                                         \
     40             destroy_func(device(), handle(), NULL);                                                                                \
     41     }
     42 
     43 #define STRINGIFY(x) #x
     44 #define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
     45 
     46 vk_testing::ErrorCallback error_callback;
     47 
     48 bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function) {
     49     if (error_callback) {
     50         error_callback(expr, file, line, function);
     51     } else {
     52         std::cerr << file << ":" << line << ": " << function << ": Expectation `" << expr << "' failed.\n";
     53     }
     54 
     55     return false;
     56 }
     57 
     58 template <class T, class S> std::vector<T> make_handles(const std::vector<S> &v) {
     59     std::vector<T> handles;
     60     handles.reserve(v.size());
     61     for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++)
     62         handles.push_back((*it)->handle());
     63     return handles;
     64 }
     65 
     66 VkMemoryAllocateInfo get_resource_alloc_info(const vk_testing::Device &dev, const VkMemoryRequirements &reqs,
     67                                              VkMemoryPropertyFlags mem_props) {
     68     VkMemoryAllocateInfo info = vk_testing::DeviceMemory::alloc_info(reqs.size, 0);
     69     dev.phy().set_memory_type(reqs.memoryTypeBits, &info, mem_props);
     70 
     71     return info;
     72 }
     73 
     74 } // namespace
     75 
     76 namespace vk_testing {
     77 
     78 void set_error_callback(ErrorCallback callback) { error_callback = callback; }
     79 
     80 VkPhysicalDeviceProperties PhysicalDevice::properties() const {
     81     VkPhysicalDeviceProperties info;
     82 
     83     vkGetPhysicalDeviceProperties(handle(), &info);
     84 
     85     return info;
     86 }
     87 
     88 std::vector<VkQueueFamilyProperties> PhysicalDevice::queue_properties() const {
     89     std::vector<VkQueueFamilyProperties> info;
     90     uint32_t count;
     91 
     92     // Call once with NULL data to receive count
     93     vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, NULL);
     94     info.resize(count);
     95     vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, info.data());
     96 
     97     return info;
     98 }
     99 
    100 VkPhysicalDeviceMemoryProperties PhysicalDevice::memory_properties() const {
    101     VkPhysicalDeviceMemoryProperties info;
    102 
    103     vkGetPhysicalDeviceMemoryProperties(handle(), &info);
    104 
    105     return info;
    106 }
    107 
    108 VkPhysicalDeviceFeatures PhysicalDevice::features() const {
    109     VkPhysicalDeviceFeatures features;
    110     vkGetPhysicalDeviceFeatures(handle(), &features);
    111     return features;
    112 }
    113 
    114 /*
    115  * Return list of Global layers available
    116  */
    117 std::vector<VkLayerProperties> GetGlobalLayers() {
    118     VkResult err;
    119     std::vector<VkLayerProperties> layers;
    120     uint32_t layer_count;
    121 
    122     do {
    123         layer_count = 0;
    124         err = vkEnumerateInstanceLayerProperties(&layer_count, NULL);
    125 
    126         if (err == VK_SUCCESS) {
    127             layers.reserve(layer_count);
    128             err = vkEnumerateInstanceLayerProperties(&layer_count, layers.data());
    129         }
    130     } while (err == VK_INCOMPLETE);
    131 
    132     assert(err == VK_SUCCESS);
    133 
    134     return layers;
    135 }
    136 
    137 /*
    138  * Return list of Global extensions provided by the ICD / Loader
    139  */
    140 std::vector<VkExtensionProperties> GetGlobalExtensions() { return GetGlobalExtensions(NULL); }
    141 
    142 /*
    143  * Return list of Global extensions provided by the specified layer
    144  * If pLayerName is NULL, will return extensions implemented by the loader /
    145  * ICDs
    146  */
    147 std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName) {
    148     std::vector<VkExtensionProperties> exts;
    149     uint32_t ext_count;
    150     VkResult err;
    151 
    152     do {
    153         ext_count = 0;
    154         err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count, NULL);
    155 
    156         if (err == VK_SUCCESS) {
    157             exts.resize(ext_count);
    158             err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count, exts.data());
    159         }
    160     } while (err == VK_INCOMPLETE);
    161 
    162     assert(err == VK_SUCCESS);
    163 
    164     return exts;
    165 }
    166 
    167 /*
    168  * Return list of PhysicalDevice extensions provided by the ICD / Loader
    169  */
    170 std::vector<VkExtensionProperties> PhysicalDevice::extensions() const { return extensions(NULL); }
    171 
    172 /*
    173  * Return list of PhysicalDevice extensions provided by the specified layer
    174  * If pLayerName is NULL, will return extensions for ICD / loader.
    175  */
    176 std::vector<VkExtensionProperties> PhysicalDevice::extensions(const char *pLayerName) const {
    177     std::vector<VkExtensionProperties> exts;
    178     VkResult err;
    179 
    180     do {
    181         uint32_t extCount = 0;
    182         err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName, &extCount, NULL);
    183 
    184         if (err == VK_SUCCESS) {
    185             exts.resize(extCount);
    186             err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName, &extCount, exts.data());
    187         }
    188     } while (err == VK_INCOMPLETE);
    189 
    190     assert(err == VK_SUCCESS);
    191 
    192     return exts;
    193 }
    194 
    195 bool PhysicalDevice::set_memory_type(const uint32_t type_bits, VkMemoryAllocateInfo *info, const VkFlags properties,
    196                                      const VkFlags forbid) const {
    197     uint32_t type_mask = type_bits;
    198     // Search memtypes to find first index with those properties
    199     for (uint32_t i = 0; i < memory_properties_.memoryTypeCount; i++) {
    200         if ((type_mask & 1) == 1) {
    201             // Type is available, does it match user properties?
    202             if ((memory_properties_.memoryTypes[i].propertyFlags & properties) == properties &&
    203                 (memory_properties_.memoryTypes[i].propertyFlags & forbid) == 0) {
    204                 info->memoryTypeIndex = i;
    205                 return true;
    206             }
    207         }
    208         type_mask >>= 1;
    209     }
    210     // No memory types matched, return failure
    211     return false;
    212 }
    213 
    214 /*
    215  * Return list of PhysicalDevice layers
    216  */
    217 std::vector<VkLayerProperties> PhysicalDevice::layers() const {
    218     std::vector<VkLayerProperties> layer_props;
    219     VkResult err;
    220 
    221     do {
    222         uint32_t layer_count = 0;
    223         err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, NULL);
    224 
    225         if (err == VK_SUCCESS) {
    226             layer_props.reserve(layer_count);
    227             err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, layer_props.data());
    228         }
    229     } while (err == VK_INCOMPLETE);
    230 
    231     assert(err == VK_SUCCESS);
    232 
    233     return layer_props;
    234 }
    235 
    236 Device::~Device() {
    237     if (!initialized())
    238         return;
    239 
    240     for (int i = 0; i < QUEUE_COUNT; i++) {
    241         for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++)
    242             delete *it;
    243         queues_[i].clear();
    244     }
    245 
    246     vkDestroyDevice(handle(), NULL);
    247 }
    248 
    249 void Device::init(std::vector<const char *> &extensions, VkPhysicalDeviceFeatures *features) {
    250     // request all queues
    251     const std::vector<VkQueueFamilyProperties> queue_props = phy_.queue_properties();
    252     std::vector<VkDeviceQueueCreateInfo> queue_info;
    253     queue_info.reserve(queue_props.size());
    254 
    255     std::vector<std::vector<float>> queue_priorities;
    256 
    257     for (uint32_t i = 0; i < (uint32_t)queue_props.size(); i++) {
    258         VkDeviceQueueCreateInfo qi = {};
    259         qi.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
    260         qi.pNext = NULL;
    261         qi.queueFamilyIndex = i;
    262         qi.queueCount = queue_props[i].queueCount;
    263 
    264         queue_priorities.emplace_back(qi.queueCount, 0.0f);
    265 
    266         qi.pQueuePriorities = queue_priorities[i].data();
    267         if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
    268             graphics_queue_node_index_ = i;
    269         }
    270         queue_info.push_back(qi);
    271     }
    272 
    273     VkDeviceCreateInfo dev_info = {};
    274     dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
    275     dev_info.pNext = NULL;
    276     dev_info.queueCreateInfoCount = queue_info.size();
    277     dev_info.pQueueCreateInfos = queue_info.data();
    278     dev_info.enabledLayerCount = 0;
    279     dev_info.ppEnabledLayerNames = NULL;
    280     dev_info.enabledExtensionCount = extensions.size();
    281     dev_info.ppEnabledExtensionNames = extensions.data();
    282 
    283     VkPhysicalDeviceFeatures all_features;
    284     if (features) {
    285         dev_info.pEnabledFeatures = features;
    286     } else {
    287         // request all supportable features enabled
    288         all_features = phy().features();
    289         dev_info.pEnabledFeatures = &all_features;
    290     }
    291 
    292     init(dev_info);
    293 }
    294 
    295 void Device::init(const VkDeviceCreateInfo &info) {
    296     VkDevice dev;
    297 
    298     if (EXPECT(vkCreateDevice(phy_.handle(), &info, NULL, &dev) == VK_SUCCESS))
    299         Handle::init(dev);
    300 
    301     init_queues();
    302     init_formats();
    303 }
    304 
    305 void Device::init_queues() {
    306     uint32_t queue_node_count;
    307 
    308     // Call with NULL data to get count
    309     vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, NULL);
    310     EXPECT(queue_node_count >= 1);
    311 
    312     VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_node_count];
    313 
    314     vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, queue_props);
    315 
    316     for (uint32_t i = 0; i < queue_node_count; i++) {
    317         VkQueue queue;
    318 
    319         for (uint32_t j = 0; j < queue_props[i].queueCount; j++) {
    320             // TODO: Need to add support for separate MEMMGR and work queues,
    321             // including synchronization
    322             vkGetDeviceQueue(handle(), i, j, &queue);
    323 
    324             if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
    325                 queues_[GRAPHICS].push_back(new Queue(queue, i));
    326             }
    327 
    328             if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
    329                 queues_[COMPUTE].push_back(new Queue(queue, i));
    330             }
    331 
    332             if (queue_props[i].queueFlags & VK_QUEUE_TRANSFER_BIT) {
    333                 queues_[DMA].push_back(new Queue(queue, i));
    334             }
    335         }
    336     }
    337 
    338     delete[] queue_props;
    339 
    340     EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
    341 }
    342 
    343 void Device::init_formats() {
    344     for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) {
    345         const VkFormat fmt = static_cast<VkFormat>(f);
    346         const VkFormatProperties props = format_properties(fmt);
    347 
    348         if (props.linearTilingFeatures) {
    349             const Format tmp = {fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures};
    350             formats_.push_back(tmp);
    351         }
    352 
    353         if (props.optimalTilingFeatures) {
    354             const Format tmp = {fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures};
    355             formats_.push_back(tmp);
    356         }
    357     }
    358 
    359     EXPECT(!formats_.empty());
    360 }
    361 
    362 VkFormatProperties Device::format_properties(VkFormat format) {
    363     VkFormatProperties data;
    364     vkGetPhysicalDeviceFormatProperties(phy().handle(), format, &data);
    365 
    366     return data;
    367 }
    368 
    369 void Device::wait() { EXPECT(vkDeviceWaitIdle(handle()) == VK_SUCCESS); }
    370 
    371 VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout) {
    372     const std::vector<VkFence> fence_handles = make_handles<VkFence>(fences);
    373     VkResult err = vkWaitForFences(handle(), fence_handles.size(), fence_handles.data(), wait_all, timeout);
    374     EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
    375 
    376     return err;
    377 }
    378 
    379 void Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes,
    380                                     const std::vector<VkCopyDescriptorSet> &copies) {
    381     vkUpdateDescriptorSets(handle(), writes.size(), writes.data(), copies.size(), copies.data());
    382 }
    383 
    384 void Queue::submit(const std::vector<const CommandBuffer *> &cmds, Fence &fence) {
    385     const std::vector<VkCommandBuffer> cmd_handles = make_handles<VkCommandBuffer>(cmds);
    386     VkSubmitInfo submit_info;
    387     submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
    388     submit_info.pNext = NULL;
    389     submit_info.waitSemaphoreCount = 0;
    390     submit_info.pWaitSemaphores = NULL;
    391     submit_info.pWaitDstStageMask = NULL;
    392     submit_info.commandBufferCount = (uint32_t)cmd_handles.size();
    393     submit_info.pCommandBuffers = cmd_handles.data();
    394     submit_info.signalSemaphoreCount = 0;
    395     submit_info.pSignalSemaphores = NULL;
    396 
    397     EXPECT(vkQueueSubmit(handle(), 1, &submit_info, fence.handle()) == VK_SUCCESS);
    398 }
    399 
    400 void Queue::submit(const CommandBuffer &cmd, Fence &fence) { submit(std::vector<const CommandBuffer *>(1, &cmd), fence); }
    401 
    402 void Queue::submit(const CommandBuffer &cmd) {
    403     Fence fence;
    404     submit(cmd, fence);
    405 }
    406 
    407 void Queue::wait() { EXPECT(vkQueueWaitIdle(handle()) == VK_SUCCESS); }
    408 
    409 DeviceMemory::~DeviceMemory() {
    410     if (initialized())
    411         vkFreeMemory(device(), handle(), NULL);
    412 }
    413 
    414 void DeviceMemory::init(const Device &dev, const VkMemoryAllocateInfo &info) {
    415     NON_DISPATCHABLE_HANDLE_INIT(vkAllocateMemory, dev, &info);
    416 }
    417 
    418 const void *DeviceMemory::map(VkFlags flags) const {
    419     void *data;
    420     if (!EXPECT(vkMapMemory(device(), handle(), 0, VK_WHOLE_SIZE, flags, &data) == VK_SUCCESS))
    421         data = NULL;
    422 
    423     return data;
    424 }
    425 
    426 void *DeviceMemory::map(VkFlags flags) {
    427     void *data;
    428     if (!EXPECT(vkMapMemory(device(), handle(), 0, VK_WHOLE_SIZE, flags, &data) == VK_SUCCESS))
    429         data = NULL;
    430 
    431     return data;
    432 }
    433 
    434 void DeviceMemory::unmap() const { vkUnmapMemory(device(), handle()); }
    435 
    436 NON_DISPATCHABLE_HANDLE_DTOR(Fence, vkDestroyFence)
    437 
    438 void Fence::init(const Device &dev, const VkFenceCreateInfo &info) { NON_DISPATCHABLE_HANDLE_INIT(vkCreateFence, dev, &info); }
    439 
    440 NON_DISPATCHABLE_HANDLE_DTOR(Semaphore, vkDestroySemaphore)
    441 
    442 void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info) {
    443     NON_DISPATCHABLE_HANDLE_INIT(vkCreateSemaphore, dev, &info);
    444 }
    445 
    446 NON_DISPATCHABLE_HANDLE_DTOR(Event, vkDestroyEvent)
    447 
    448 void Event::init(const Device &dev, const VkEventCreateInfo &info) { NON_DISPATCHABLE_HANDLE_INIT(vkCreateEvent, dev, &info); }
    449 
    450 void Event::set() { EXPECT(vkSetEvent(device(), handle()) == VK_SUCCESS); }
    451 
    452 void Event::reset() { EXPECT(vkResetEvent(device(), handle()) == VK_SUCCESS); }
    453 
    454 NON_DISPATCHABLE_HANDLE_DTOR(QueryPool, vkDestroyQueryPool)
    455 
    456 void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info) {
    457     NON_DISPATCHABLE_HANDLE_INIT(vkCreateQueryPool, dev, &info);
    458 }
    459 
    460 VkResult QueryPool::results(uint32_t first, uint32_t count, size_t size, void *data, size_t stride) {
    461     VkResult err = vkGetQueryPoolResults(device(), handle(), first, count, size, data, stride, 0);
    462     EXPECT(err == VK_SUCCESS || err == VK_NOT_READY);
    463 
    464     return err;
    465 }
    466 
    467 NON_DISPATCHABLE_HANDLE_DTOR(Buffer, vkDestroyBuffer)
    468 
    469 void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags mem_props) {
    470     init_no_mem(dev, info);
    471 
    472     internal_mem_.init(dev, get_resource_alloc_info(dev, memory_requirements(), mem_props));
    473     bind_memory(internal_mem_, 0);
    474 }
    475 
    476 void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info) {
    477     NON_DISPATCHABLE_HANDLE_INIT(vkCreateBuffer, dev, &info);
    478     create_info_ = info;
    479 }
    480 
    481 VkMemoryRequirements Buffer::memory_requirements() const {
    482     VkMemoryRequirements reqs;
    483 
    484     vkGetBufferMemoryRequirements(device(), handle(), &reqs);
    485 
    486     return reqs;
    487 }
    488 
    489 void Buffer::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) {
    490     EXPECT(vkBindBufferMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
    491 }
    492 
    493 NON_DISPATCHABLE_HANDLE_DTOR(BufferView, vkDestroyBufferView)
    494 
    495 void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info) {
    496     NON_DISPATCHABLE_HANDLE_INIT(vkCreateBufferView, dev, &info);
    497 }
    498 
    499 NON_DISPATCHABLE_HANDLE_DTOR(Image, vkDestroyImage)
    500 
    501 void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags mem_props) {
    502     init_no_mem(dev, info);
    503 
    504     if (initialized()) {
    505         internal_mem_.init(dev, get_resource_alloc_info(dev, memory_requirements(), mem_props));
    506         bind_memory(internal_mem_, 0);
    507     }
    508 }
    509 
    510 void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info) {
    511     NON_DISPATCHABLE_HANDLE_INIT(vkCreateImage, dev, &info);
    512     if (initialized()) {
    513         init_info(dev, info);
    514     }
    515 }
    516 
    517 void Image::init_info(const Device &dev, const VkImageCreateInfo &info) {
    518     create_info_ = info;
    519 
    520     for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
    521         if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
    522             format_features_ = it->features;
    523             break;
    524         }
    525     }
    526 }
    527 
    528 VkMemoryRequirements Image::memory_requirements() const {
    529     VkMemoryRequirements reqs;
    530 
    531     vkGetImageMemoryRequirements(device(), handle(), &reqs);
    532 
    533     return reqs;
    534 }
    535 
    536 void Image::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) {
    537     EXPECT(vkBindImageMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
    538 }
    539 
    540 VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const {
    541     VkSubresourceLayout data;
    542     size_t size = sizeof(data);
    543     vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
    544     if (size != sizeof(data))
    545         memset(&data, 0, sizeof(data));
    546 
    547     return data;
    548 }
    549 
    550 VkSubresourceLayout Image::subresource_layout(const VkImageSubresourceLayers &subrescopy) const {
    551     VkSubresourceLayout data;
    552     VkImageSubresource subres = subresource(subrescopy.aspectMask, subrescopy.mipLevel, subrescopy.baseArrayLayer);
    553     size_t size = sizeof(data);
    554     vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
    555     if (size != sizeof(data))
    556         memset(&data, 0, sizeof(data));
    557 
    558     return data;
    559 }
    560 
    561 bool Image::transparent() const {
    562     return (create_info_.tiling == VK_IMAGE_TILING_LINEAR && create_info_.samples == VK_SAMPLE_COUNT_1_BIT &&
    563             !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)));
    564 }
    565 
    566 NON_DISPATCHABLE_HANDLE_DTOR(ImageView, vkDestroyImageView)
    567 
    568 void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info) {
    569     NON_DISPATCHABLE_HANDLE_INIT(vkCreateImageView, dev, &info);
    570 }
    571 
    572 NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule, vkDestroyShaderModule)
    573 
    574 void ShaderModule::init(const Device &dev, const VkShaderModuleCreateInfo &info) {
    575     NON_DISPATCHABLE_HANDLE_INIT(vkCreateShaderModule, dev, &info);
    576 }
    577 
    578 VkResult ShaderModule::init_try(const Device &dev, const VkShaderModuleCreateInfo &info) {
    579     VkShaderModule mod;
    580 
    581     VkResult err = vkCreateShaderModule(dev.handle(), &info, NULL, &mod);
    582     if (err == VK_SUCCESS)
    583         NonDispHandle::init(dev.handle(), mod);
    584 
    585     return err;
    586 }
    587 
    588 NON_DISPATCHABLE_HANDLE_DTOR(Pipeline, vkDestroyPipeline)
    589 
    590 void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info) {
    591     VkPipelineCache cache;
    592     VkPipelineCacheCreateInfo ci;
    593     memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
    594     ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
    595     VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
    596     if (err == VK_SUCCESS) {
    597         NON_DISPATCHABLE_HANDLE_INIT(vkCreateGraphicsPipelines, dev, cache, 1, &info);
    598         vkDestroyPipelineCache(dev.handle(), cache, NULL);
    599     }
    600 }
    601 
    602 VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info) {
    603     VkPipeline pipe;
    604     VkPipelineCache cache;
    605     VkPipelineCacheCreateInfo ci;
    606     memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
    607     ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
    608     VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
    609     EXPECT(err == VK_SUCCESS);
    610     if (err == VK_SUCCESS) {
    611         err = vkCreateGraphicsPipelines(dev.handle(), cache, 1, &info, NULL, &pipe);
    612         if (err == VK_SUCCESS) {
    613             NonDispHandle::init(dev.handle(), pipe);
    614         }
    615         vkDestroyPipelineCache(dev.handle(), cache, NULL);
    616     }
    617 
    618     return err;
    619 }
    620 
    621 void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info) {
    622     VkPipelineCache cache;
    623     VkPipelineCacheCreateInfo ci;
    624     memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
    625     ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
    626     VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
    627     if (err == VK_SUCCESS) {
    628         NON_DISPATCHABLE_HANDLE_INIT(vkCreateComputePipelines, dev, cache, 1, &info);
    629         vkDestroyPipelineCache(dev.handle(), cache, NULL);
    630     }
    631 }
    632 
    633 NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout, vkDestroyPipelineLayout)
    634 
    635 void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info,
    636                           const std::vector<const DescriptorSetLayout *> &layouts) {
    637     const std::vector<VkDescriptorSetLayout> layout_handles = make_handles<VkDescriptorSetLayout>(layouts);
    638     info.pSetLayouts = layout_handles.data();
    639 
    640     NON_DISPATCHABLE_HANDLE_INIT(vkCreatePipelineLayout, dev, &info);
    641 }
    642 
    643 NON_DISPATCHABLE_HANDLE_DTOR(Sampler, vkDestroySampler)
    644 
    645 void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info) {
    646     NON_DISPATCHABLE_HANDLE_INIT(vkCreateSampler, dev, &info);
    647 }
    648 
    649 NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout, vkDestroyDescriptorSetLayout)
    650 
    651 void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info) {
    652     NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorSetLayout, dev, &info);
    653 }
    654 
    655 NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool, vkDestroyDescriptorPool)
    656 
    657 void DescriptorPool::init(const Device &dev, const VkDescriptorPoolCreateInfo &info) {
    658     setDynamicUsage(info.flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
    659     NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorPool, dev, &info);
    660 }
    661 
    662 void DescriptorPool::reset() { EXPECT(vkResetDescriptorPool(device(), handle(), 0) == VK_SUCCESS); }
    663 
    664 std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev,
    665                                                         const std::vector<const DescriptorSetLayout *> &layouts) {
    666     const std::vector<VkDescriptorSetLayout> layout_handles = make_handles<VkDescriptorSetLayout>(layouts);
    667 
    668     std::vector<VkDescriptorSet> set_handles;
    669     set_handles.resize(layout_handles.size());
    670 
    671     VkDescriptorSetAllocateInfo alloc_info = {};
    672     alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
    673     alloc_info.descriptorSetCount = layout_handles.size();
    674     alloc_info.descriptorPool = handle();
    675     alloc_info.pSetLayouts = layout_handles.data();
    676     VkResult err = vkAllocateDescriptorSets(device(), &alloc_info, set_handles.data());
    677     EXPECT(err == VK_SUCCESS);
    678 
    679     std::vector<DescriptorSet *> sets;
    680     for (std::vector<VkDescriptorSet>::const_iterator it = set_handles.begin(); it != set_handles.end(); it++) {
    681         // do descriptor sets need memories bound?
    682         DescriptorSet *descriptorSet = new DescriptorSet(dev, this, *it);
    683         sets.push_back(descriptorSet);
    684     }
    685     return sets;
    686 }
    687 
    688 std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout, uint32_t count) {
    689     return alloc_sets(dev, std::vector<const DescriptorSetLayout *>(count, &layout));
    690 }
    691 
    692 DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout) {
    693     std::vector<DescriptorSet *> set = alloc_sets(dev, layout, 1);
    694     return (set.empty()) ? NULL : set[0];
    695 }
    696 
    697 DescriptorSet::~DescriptorSet() {
    698     if (initialized()) {
    699         // Only call vkFree* on sets allocated from pool with usage *_DYNAMIC
    700         if (containing_pool_->getDynamicUsage()) {
    701             VkDescriptorSet sets[1] = {handle()};
    702             EXPECT(vkFreeDescriptorSets(device(), containing_pool_->GetObj(), 1, sets) == VK_SUCCESS);
    703         }
    704     }
    705 }
    706 
    707 NON_DISPATCHABLE_HANDLE_DTOR(CommandPool, vkDestroyCommandPool)
    708 
    709 void CommandPool::init(const Device &dev, const VkCommandPoolCreateInfo &info) {
    710     NON_DISPATCHABLE_HANDLE_INIT(vkCreateCommandPool, dev, &info);
    711 }
    712 
    713 CommandBuffer::~CommandBuffer() {
    714     if (initialized()) {
    715         VkCommandBuffer cmds[] = {handle()};
    716         vkFreeCommandBuffers(dev_handle_, cmd_pool_, 1, cmds);
    717     }
    718 }
    719 
    720 void CommandBuffer::init(const Device &dev, const VkCommandBufferAllocateInfo &info) {
    721     VkCommandBuffer cmd;
    722 
    723     // Make sure commandPool is set
    724     assert(info.commandPool);
    725 
    726     if (EXPECT(vkAllocateCommandBuffers(dev.handle(), &info, &cmd) == VK_SUCCESS)) {
    727         Handle::init(cmd);
    728         dev_handle_ = dev.handle();
    729         cmd_pool_ = info.commandPool;
    730     }
    731 }
    732 
    733 void CommandBuffer::begin(const VkCommandBufferBeginInfo *info) { EXPECT(vkBeginCommandBuffer(handle(), info) == VK_SUCCESS); }
    734 
    735 void CommandBuffer::begin() {
    736     VkCommandBufferBeginInfo info = {};
    737     VkCommandBufferInheritanceInfo hinfo = {};
    738     info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
    739     info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
    740     info.pInheritanceInfo = &hinfo;
    741     hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
    742     hinfo.pNext = NULL;
    743     hinfo.renderPass = VK_NULL_HANDLE;
    744     hinfo.subpass = 0;
    745     hinfo.framebuffer = VK_NULL_HANDLE;
    746     hinfo.occlusionQueryEnable = VK_FALSE;
    747     hinfo.queryFlags = 0;
    748     hinfo.pipelineStatistics = 0;
    749 
    750     begin(&info);
    751 }
    752 
    753 void CommandBuffer::end() { EXPECT(vkEndCommandBuffer(handle()) == VK_SUCCESS); }
    754 
    755 void CommandBuffer::reset(VkCommandBufferResetFlags flags) { EXPECT(vkResetCommandBuffer(handle(), flags) == VK_SUCCESS); }
    756 
    757 }; // namespace vk_testing
    758