Home | History | Annotate | Download | only in layers
      1 /* Copyright (c) 2015-2016 The Khronos Group Inc.
      2  * Copyright (c) 2015-2016 Valve Corporation
      3  * Copyright (c) 2015-2016 LunarG, Inc.
      4  *
      5  * Licensed under the Apache License, Version 2.0 (the "License");
      6  * you may not use this file except in compliance with the License.
      7  * You may obtain a copy of the License at
      8  *
      9  *     http://www.apache.org/licenses/LICENSE-2.0
     10  *
     11  * Unless required by applicable law or agreed to in writing, software
     12  * distributed under the License is distributed on an "AS IS" BASIS,
     13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     14  * See the License for the specific language governing permissions and
     15  * limitations under the License.
     16  *
     17  * Author: Cody Northrop <cody (at) lunarg.com>
     18  * Author: Mike Stroyan <mike (at) LunarG.com>
     19  */
     20 
     21 #ifndef THREADING_H
     22 #define THREADING_H
     23 #include <condition_variable>
     24 #include <mutex>
     25 #include <vector>
     26 #include "vk_layer_config.h"
     27 #include "vk_layer_logging.h"
     28 
     29 #if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) ||       \
     30     defined(__aarch64__) || defined(__powerpc64__)
     31 // If pointers are 64-bit, then there can be separate counters for each
     32 // NONDISPATCHABLE_HANDLE type.  Otherwise they are all typedef uint64_t.
     33 #define DISTINCT_NONDISPATCHABLE_HANDLES
     34 #endif
     35 
     36 // Draw State ERROR codes
     37 enum THREADING_CHECKER_ERROR {
     38     THREADING_CHECKER_NONE,                // Used for INFO & other non-error messages
     39     THREADING_CHECKER_MULTIPLE_THREADS,    // Object used simultaneously by multiple threads
     40     THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
     41 };
     42 
     43 struct object_use_data {
     44     loader_platform_thread_id thread;
     45     int reader_count;
     46     int writer_count;
     47 };
     48 
     49 struct layer_data;
     50 
     51 static std::mutex global_lock;
     52 static std::condition_variable global_condition;
     53 
     54 template <typename T> class counter {
     55   public:
     56     const char *typeName;
     57     VkDebugReportObjectTypeEXT objectType;
     58     std::unordered_map<T, object_use_data> uses;
     59     void startWrite(debug_report_data *report_data, T object) {
     60         bool skipCall = false;
     61         loader_platform_thread_id tid = loader_platform_get_thread_id();
     62         std::unique_lock<std::mutex> lock(global_lock);
     63         if (uses.find(object) == uses.end()) {
     64             // There is no current use of the object.  Record writer thread.
     65             struct object_use_data *use_data = &uses[object];
     66             use_data->reader_count = 0;
     67             use_data->writer_count = 1;
     68             use_data->thread = tid;
     69         } else {
     70             struct object_use_data *use_data = &uses[object];
     71             if (use_data->reader_count == 0) {
     72                 // There are no readers.  Two writers just collided.
     73                 if (use_data->thread != tid) {
     74                     skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
     75                                         /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
     76                                         "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
     77                                         typeName, use_data->thread, tid);
     78                     if (skipCall) {
     79                         // Wait for thread-safe access to object instead of skipping call.
     80                         while (uses.find(object) != uses.end()) {
     81                             global_condition.wait(lock);
     82                         }
     83                         // There is now no current use of the object.  Record writer thread.
     84                         struct object_use_data *use_data = &uses[object];
     85                         use_data->thread = tid;
     86                         use_data->reader_count = 0;
     87                         use_data->writer_count = 1;
     88                     } else {
     89                         // Continue with an unsafe use of the object.
     90                         use_data->thread = tid;
     91                         use_data->writer_count += 1;
     92                     }
     93                 } else {
     94                     // This is either safe multiple use in one call, or recursive use.
     95                     // There is no way to make recursion safe.  Just forge ahead.
     96                     use_data->writer_count += 1;
     97                 }
     98             } else {
     99                 // There are readers.  This writer collided with them.
    100                 if (use_data->thread != tid) {
    101                     skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
    102                                         /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
    103                                         "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
    104                                         typeName, use_data->thread, tid);
    105                     if (skipCall) {
    106                         // Wait for thread-safe access to object instead of skipping call.
    107                         while (uses.find(object) != uses.end()) {
    108                             global_condition.wait(lock);
    109                         }
    110                         // There is now no current use of the object.  Record writer thread.
    111                         struct object_use_data *use_data = &uses[object];
    112                         use_data->thread = tid;
    113                         use_data->reader_count = 0;
    114                         use_data->writer_count = 1;
    115                     } else {
    116                         // Continue with an unsafe use of the object.
    117                         use_data->thread = tid;
    118                         use_data->writer_count += 1;
    119                     }
    120                 } else {
    121                     // This is either safe multiple use in one call, or recursive use.
    122                     // There is no way to make recursion safe.  Just forge ahead.
    123                     use_data->writer_count += 1;
    124                 }
    125             }
    126         }
    127     }
    128 
    129     void finishWrite(T object) {
    130         // Object is no longer in use
    131         std::unique_lock<std::mutex> lock(global_lock);
    132         uses[object].writer_count -= 1;
    133         if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
    134             uses.erase(object);
    135         }
    136         // Notify any waiting threads that this object may be safe to use
    137         lock.unlock();
    138         global_condition.notify_all();
    139     }
    140 
    141     void startRead(debug_report_data *report_data, T object) {
    142         bool skipCall = false;
    143         loader_platform_thread_id tid = loader_platform_get_thread_id();
    144         std::unique_lock<std::mutex> lock(global_lock);
    145         if (uses.find(object) == uses.end()) {
    146             // There is no current use of the object.  Record reader count
    147             struct object_use_data *use_data = &uses[object];
    148             use_data->reader_count = 1;
    149             use_data->writer_count = 0;
    150             use_data->thread = tid;
    151         } else if (uses[object].writer_count > 0 && uses[object].thread != tid) {
    152             // There is a writer of the object.
    153             skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
    154                                 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
    155                                 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", typeName,
    156                                 uses[object].thread, tid);
    157             if (skipCall) {
    158                 // Wait for thread-safe access to object instead of skipping call.
    159                 while (uses.find(object) != uses.end()) {
    160                     global_condition.wait(lock);
    161                 }
    162                 // There is no current use of the object.  Record reader count
    163                 struct object_use_data *use_data = &uses[object];
    164                 use_data->reader_count = 1;
    165                 use_data->writer_count = 0;
    166                 use_data->thread = tid;
    167             } else {
    168                 uses[object].reader_count += 1;
    169             }
    170         } else {
    171             // There are other readers of the object.  Increase reader count
    172             uses[object].reader_count += 1;
    173         }
    174     }
    175     void finishRead(T object) {
    176         std::unique_lock<std::mutex> lock(global_lock);
    177         uses[object].reader_count -= 1;
    178         if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
    179             uses.erase(object);
    180         }
    181         // Notify and waiting threads that this object may be safe to use
    182         lock.unlock();
    183         global_condition.notify_all();
    184     }
    185     counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
    186         typeName = name;
    187         objectType = type;
    188     }
    189 };
    190 
    191 struct layer_data {
    192     VkInstance instance;
    193 
    194     debug_report_data *report_data;
    195     std::vector<VkDebugReportCallbackEXT> logging_callback;
    196     VkLayerDispatchTable *device_dispatch_table;
    197     VkLayerInstanceDispatchTable *instance_dispatch_table;
    198     // The following are for keeping track of the temporary callbacks that can
    199     // be used in vkCreateInstance and vkDestroyInstance:
    200     uint32_t num_tmp_callbacks;
    201     VkDebugReportCallbackCreateInfoEXT *tmp_dbg_create_infos;
    202     VkDebugReportCallbackEXT *tmp_callbacks;
    203     counter<VkCommandBuffer> c_VkCommandBuffer;
    204     counter<VkDevice> c_VkDevice;
    205     counter<VkInstance> c_VkInstance;
    206     counter<VkQueue> c_VkQueue;
    207 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
    208     counter<VkBuffer> c_VkBuffer;
    209     counter<VkBufferView> c_VkBufferView;
    210     counter<VkCommandPool> c_VkCommandPool;
    211     counter<VkDescriptorPool> c_VkDescriptorPool;
    212     counter<VkDescriptorSet> c_VkDescriptorSet;
    213     counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
    214     counter<VkDeviceMemory> c_VkDeviceMemory;
    215     counter<VkEvent> c_VkEvent;
    216     counter<VkFence> c_VkFence;
    217     counter<VkFramebuffer> c_VkFramebuffer;
    218     counter<VkImage> c_VkImage;
    219     counter<VkImageView> c_VkImageView;
    220     counter<VkPipeline> c_VkPipeline;
    221     counter<VkPipelineCache> c_VkPipelineCache;
    222     counter<VkPipelineLayout> c_VkPipelineLayout;
    223     counter<VkQueryPool> c_VkQueryPool;
    224     counter<VkRenderPass> c_VkRenderPass;
    225     counter<VkSampler> c_VkSampler;
    226     counter<VkSemaphore> c_VkSemaphore;
    227     counter<VkShaderModule> c_VkShaderModule;
    228     counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
    229 #else  // DISTINCT_NONDISPATCHABLE_HANDLES
    230     counter<uint64_t> c_uint64_t;
    231 #endif // DISTINCT_NONDISPATCHABLE_HANDLES
    232     layer_data()
    233         : report_data(nullptr), num_tmp_callbacks(0), tmp_dbg_create_infos(nullptr), tmp_callbacks(nullptr),
    234           c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
    235           c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
    236           c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
    237           c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
    238 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
    239           c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
    240           c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
    241           c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
    242           c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
    243           c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
    244           c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
    245           c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
    246           c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT), c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
    247           c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
    248           c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
    249           c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
    250           c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
    251           c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
    252           c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
    253           c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
    254           c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
    255           c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
    256           c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
    257           c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
    258           c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT)
    259 #else  // DISTINCT_NONDISPATCHABLE_HANDLES
    260           c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
    261 #endif // DISTINCT_NONDISPATCHABLE_HANDLES
    262               {};
    263 };
    264 
    265 #define WRAPPER(type)                                                                                                              \
    266     static void startWriteObject(struct layer_data *my_data, type object) {                                                        \
    267         my_data->c_##type.startWrite(my_data->report_data, object);                                                                \
    268     }                                                                                                                              \
    269     static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); }              \
    270     static void startReadObject(struct layer_data *my_data, type object) {                                                         \
    271         my_data->c_##type.startRead(my_data->report_data, object);                                                                 \
    272     }                                                                                                                              \
    273     static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); }
    274 
    275 WRAPPER(VkDevice)
    276 WRAPPER(VkInstance)
    277 WRAPPER(VkQueue)
    278 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
    279 WRAPPER(VkBuffer)
    280 WRAPPER(VkBufferView)
    281 WRAPPER(VkCommandPool)
    282 WRAPPER(VkDescriptorPool)
    283 WRAPPER(VkDescriptorSet)
    284 WRAPPER(VkDescriptorSetLayout)
    285 WRAPPER(VkDeviceMemory)
    286 WRAPPER(VkEvent)
    287 WRAPPER(VkFence)
    288 WRAPPER(VkFramebuffer)
    289 WRAPPER(VkImage)
    290 WRAPPER(VkImageView)
    291 WRAPPER(VkPipeline)
    292 WRAPPER(VkPipelineCache)
    293 WRAPPER(VkPipelineLayout)
    294 WRAPPER(VkQueryPool)
    295 WRAPPER(VkRenderPass)
    296 WRAPPER(VkSampler)
    297 WRAPPER(VkSemaphore)
    298 WRAPPER(VkShaderModule)
    299 WRAPPER(VkDebugReportCallbackEXT)
    300 #else  // DISTINCT_NONDISPATCHABLE_HANDLES
    301 WRAPPER(uint64_t)
    302 #endif // DISTINCT_NONDISPATCHABLE_HANDLES
    303 
    304 static std::unordered_map<void *, layer_data *> layer_data_map;
    305 static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
    306 
    307 // VkCommandBuffer needs check for implicit use of command pool
    308 static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
    309     if (lockPool) {
    310         std::unique_lock<std::mutex> lock(global_lock);
    311         VkCommandPool pool = command_pool_map[object];
    312         lock.unlock();
    313         startWriteObject(my_data, pool);
    314     }
    315     my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
    316 }
    317 static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
    318     my_data->c_VkCommandBuffer.finishWrite(object);
    319     if (lockPool) {
    320         std::unique_lock<std::mutex> lock(global_lock);
    321         VkCommandPool pool = command_pool_map[object];
    322         lock.unlock();
    323         finishWriteObject(my_data, pool);
    324     }
    325 }
    326 static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) {
    327     std::unique_lock<std::mutex> lock(global_lock);
    328     VkCommandPool pool = command_pool_map[object];
    329     lock.unlock();
    330     startReadObject(my_data, pool);
    331     my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
    332 }
    333 static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) {
    334     my_data->c_VkCommandBuffer.finishRead(object);
    335     std::unique_lock<std::mutex> lock(global_lock);
    336     VkCommandPool pool = command_pool_map[object];
    337     lock.unlock();
    338     finishReadObject(my_data, pool);
    339 }
    340 #endif // THREADING_H
    341