1 /* Copyright (c) 2015-2016 The Khronos Group Inc.
2  * Copyright (c) 2015-2016 Valve Corporation
3  * Copyright (c) 2015-2016 LunarG, Inc.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * Author: Cody Northrop <cody@lunarg.com>
18  * Author: Mike Stroyan <mike@LunarG.com>
19  */
20 
21 #ifndef THREADING_H
22 #define THREADING_H
23 #include <condition_variable>
24 #include <mutex>
25 #include <vector>
26 #include "vk_layer_config.h"
27 #include "vk_layer_logging.h"
28 
29 #if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || \
30     defined(__aarch64__) || defined(__powerpc64__)
31 // If pointers are 64-bit, then there can be separate counters for each
32 // NONDISPATCHABLE_HANDLE type.  Otherwise they are all typedef uint64_t.
33 #define DISTINCT_NONDISPATCHABLE_HANDLES
34 #endif
35 
36 // Draw State ERROR codes
37 enum THREADING_CHECKER_ERROR {
38     THREADING_CHECKER_NONE,                 // Used for INFO & other non-error messages
39     THREADING_CHECKER_MULTIPLE_THREADS,     // Object used simultaneously by multiple threads
40     THREADING_CHECKER_SINGLE_THREAD_REUSE,  // Object used simultaneously by recursion in single thread
41 };
42 
43 struct object_use_data {
44     loader_platform_thread_id thread;
45     int reader_count;
46     int writer_count;
47 };
48 
49 struct layer_data;
50 
51 namespace threading {
52 volatile bool vulkan_in_use = false;
53 volatile bool vulkan_multi_threaded = false;
54 // starting check if an application is using vulkan from multiple threads.
55 inline bool startMultiThread() {
56     if (vulkan_multi_threaded) {
57         return true;
58     }
59     if (vulkan_in_use) {
60         vulkan_multi_threaded = true;
61         return true;
62     }
63     vulkan_in_use = true;
64     return false;
65 }
66 
67 // finishing check if an application is using vulkan from multiple threads.
68 inline void finishMultiThread() { vulkan_in_use = false; }
69 }  // namespace threading
70 
71 template <typename T>
72 class counter {
73    public:
74     const char *typeName;
75     VkDebugReportObjectTypeEXT objectType;
76     std::unordered_map<T, object_use_data> uses;
77     std::mutex counter_lock;
78     std::condition_variable counter_condition;
79     void startWrite(debug_report_data *report_data, T object) {
80         if (object == VK_NULL_HANDLE) {
81             return;
82         }
83         bool skipCall = false;
84         loader_platform_thread_id tid = loader_platform_get_thread_id();
85         std::unique_lock<std::mutex> lock(counter_lock);
86         if (uses.find(object) == uses.end()) {
87             // There is no current use of the object.  Record writer thread.
88             struct object_use_data *use_data = &uses[object];
89             use_data->reader_count = 0;
90             use_data->writer_count = 1;
91             use_data->thread = tid;
92         } else {
93             struct object_use_data *use_data = &uses[object];
94             if (use_data->reader_count == 0) {
95                 // There are no readers.  Two writers just collided.
96                 if (use_data->thread != tid) {
97                     skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), 0,
98                                         THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
99                                         "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
100                                         typeName, use_data->thread, tid);
101                     if (skipCall) {
102                         // Wait for thread-safe access to object instead of skipping call.
103                         while (uses.find(object) != uses.end()) {
104                             counter_condition.wait(lock);
105                         }
106                         // There is now no current use of the object.  Record writer thread.
107                         struct object_use_data *new_use_data = &uses[object];
108                         new_use_data->thread = tid;
109                         new_use_data->reader_count = 0;
110                         new_use_data->writer_count = 1;
111                     } else {
112                         // Continue with an unsafe use of the object.
113                         use_data->thread = tid;
114                         use_data->writer_count += 1;
115                     }
116                 } else {
117                     // This is either safe multiple use in one call, or recursive use.
118                     // There is no way to make recursion safe.  Just forge ahead.
119                     use_data->writer_count += 1;
120                 }
121             } else {
122                 // There are readers.  This writer collided with them.
123                 if (use_data->thread != tid) {
124                     skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), 0,
125                                         THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
126                                         "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
127                                         typeName, use_data->thread, tid);
128                     if (skipCall) {
129                         // Wait for thread-safe access to object instead of skipping call.
130                         while (uses.find(object) != uses.end()) {
131                             counter_condition.wait(lock);
132                         }
133                         // There is now no current use of the object.  Record writer thread.
134                         struct object_use_data *new_use_data = &uses[object];
135                         new_use_data->thread = tid;
136                         new_use_data->reader_count = 0;
137                         new_use_data->writer_count = 1;
138                     } else {
139                         // Continue with an unsafe use of the object.
140                         use_data->thread = tid;
141                         use_data->writer_count += 1;
142                     }
143                 } else {
144                     // This is either safe multiple use in one call, or recursive use.
145                     // There is no way to make recursion safe.  Just forge ahead.
146                     use_data->writer_count += 1;
147                 }
148             }
149         }
150     }
151 
152     void finishWrite(T object) {
153         if (object == VK_NULL_HANDLE) {
154             return;
155         }
156         // Object is no longer in use
157         std::unique_lock<std::mutex> lock(counter_lock);
158         uses[object].writer_count -= 1;
159         if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
160             uses.erase(object);
161         }
162         // Notify any waiting threads that this object may be safe to use
163         lock.unlock();
164         counter_condition.notify_all();
165     }
166 
167     void startRead(debug_report_data *report_data, T object) {
168         if (object == VK_NULL_HANDLE) {
169             return;
170         }
171         bool skipCall = false;
172         loader_platform_thread_id tid = loader_platform_get_thread_id();
173         std::unique_lock<std::mutex> lock(counter_lock);
174         if (uses.find(object) == uses.end()) {
175             // There is no current use of the object.  Record reader count
176             struct object_use_data *use_data = &uses[object];
177             use_data->reader_count = 1;
178             use_data->writer_count = 0;
179             use_data->thread = tid;
180         } else if (uses[object].writer_count > 0 && uses[object].thread != tid) {
181             // There is a writer of the object.
182             skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), 0,
183                                 THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
184                                 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", typeName,
185                                 uses[object].thread, tid);
186             if (skipCall) {
187                 // Wait for thread-safe access to object instead of skipping call.
188                 while (uses.find(object) != uses.end()) {
189                     counter_condition.wait(lock);
190                 }
191                 // There is no current use of the object.  Record reader count
192                 struct object_use_data *use_data = &uses[object];
193                 use_data->reader_count = 1;
194                 use_data->writer_count = 0;
195                 use_data->thread = tid;
196             } else {
197                 uses[object].reader_count += 1;
198             }
199         } else {
200             // There are other readers of the object.  Increase reader count
201             uses[object].reader_count += 1;
202         }
203     }
204     void finishRead(T object) {
205         if (object == VK_NULL_HANDLE) {
206             return;
207         }
208         std::unique_lock<std::mutex> lock(counter_lock);
209         uses[object].reader_count -= 1;
210         if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
211             uses.erase(object);
212         }
213         // Notify any waiting threads that this object may be safe to use
214         lock.unlock();
215         counter_condition.notify_all();
216     }
217     counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
218         typeName = name;
219         objectType = type;
220     }
221 };
222 
223 struct layer_data {
224     VkInstance instance;
225 
226     debug_report_data *report_data;
227     std::vector<VkDebugReportCallbackEXT> logging_callback;
228     VkLayerDispatchTable *device_dispatch_table;
229     VkLayerInstanceDispatchTable *instance_dispatch_table;
230     // The following are for keeping track of the temporary callbacks that can
231     // be used in vkCreateInstance and vkDestroyInstance:
232     uint32_t num_tmp_callbacks;
233     VkDebugReportCallbackCreateInfoEXT *tmp_dbg_create_infos;
234     VkDebugReportCallbackEXT *tmp_callbacks;
235     counter<VkCommandBuffer> c_VkCommandBuffer;
236     counter<VkDevice> c_VkDevice;
237     counter<VkInstance> c_VkInstance;
238     counter<VkQueue> c_VkQueue;
239 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
240     counter<VkBuffer> c_VkBuffer;
241     counter<VkBufferView> c_VkBufferView;
242     counter<VkCommandPool> c_VkCommandPool;
243     counter<VkDescriptorPool> c_VkDescriptorPool;
244     counter<VkDescriptorSet> c_VkDescriptorSet;
245     counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
246     counter<VkDeviceMemory> c_VkDeviceMemory;
247     counter<VkEvent> c_VkEvent;
248     counter<VkFence> c_VkFence;
249     counter<VkFramebuffer> c_VkFramebuffer;
250     counter<VkImage> c_VkImage;
251     counter<VkImageView> c_VkImageView;
252     counter<VkPipeline> c_VkPipeline;
253     counter<VkPipelineCache> c_VkPipelineCache;
254     counter<VkPipelineLayout> c_VkPipelineLayout;
255     counter<VkQueryPool> c_VkQueryPool;
256     counter<VkRenderPass> c_VkRenderPass;
257     counter<VkSampler> c_VkSampler;
258     counter<VkSemaphore> c_VkSemaphore;
259     counter<VkShaderModule> c_VkShaderModule;
260     counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
261     counter<VkObjectTableNVX> c_VkObjectTableNVX;
262     counter<VkIndirectCommandsLayoutNVX> c_VkIndirectCommandsLayoutNVX;
263     counter<VkDisplayKHR> c_VkDisplayKHR;
264     counter<VkDisplayModeKHR> c_VkDisplayModeKHR;
265     counter<VkSurfaceKHR> c_VkSurfaceKHR;
266     counter<VkSwapchainKHR> c_VkSwapchainKHR;
267     counter<VkDescriptorUpdateTemplateKHR> c_VkDescriptorUpdateTemplateKHR;
268     counter<VkValidationCacheEXT> c_VkValidationCacheEXT;
269     counter<VkSamplerYcbcrConversionKHR> c_VkSamplerYcbcrConversionKHR;
270 #else   // DISTINCT_NONDISPATCHABLE_HANDLES
271     counter<uint64_t> c_uint64_t;
272 #endif  // DISTINCT_NONDISPATCHABLE_HANDLES
273 
274     layer_data()
275         : report_data(nullptr),
276           num_tmp_callbacks(0),
277           tmp_dbg_create_infos(nullptr),
278           tmp_callbacks(nullptr),
279           c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
280           c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
281           c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
282           c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
283 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
284           c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
285           c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
286           c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
287           c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
288           c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
289           c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
290           c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
291           c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT),
292           c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
293           c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
294           c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
295           c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
296           c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
297           c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
298           c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
299           c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
300           c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
301           c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
302           c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
303           c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
304           c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT),
305           c_VkObjectTableNVX("VkObjectTableNVX", VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT),
306           c_VkIndirectCommandsLayoutNVX("VkIndirectCommandsLayoutNVX",
307                                         VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT),
308           c_VkDisplayKHR("VkDisplayKHR", VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT),
309           c_VkDisplayModeKHR("VkDisplayModeKHR", VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT),
310           c_VkSurfaceKHR("VkSurfaceKHR", VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT),
311           c_VkSwapchainKHR("VkSwapchainKHR", VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT),
312           c_VkDescriptorUpdateTemplateKHR("VkDescriptorUpdateTemplateKHR",
313                                           VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT),
314           c_VkSamplerYcbcrConversionKHR("VkSamplerYcbcrConversionKHR", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT)
315 #else   // DISTINCT_NONDISPATCHABLE_HANDLES
316           c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
317 #endif  // DISTINCT_NONDISPATCHABLE_HANDLES
318               {};
319 };
320 
321 #define WRAPPER(type)                                                                                                 \
322     static void startWriteObject(struct layer_data *my_data, type object) {                                           \
323         my_data->c_##type.startWrite(my_data->report_data, object);                                                   \
324     }                                                                                                                 \
325     static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \
326     static void startReadObject(struct layer_data *my_data, type object) {                                            \
327         my_data->c_##type.startRead(my_data->report_data, object);                                                    \
328     }                                                                                                                 \
329     static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); }
330 
331 WRAPPER(VkDevice)
332 WRAPPER(VkInstance)
333 WRAPPER(VkQueue)
334 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
335 WRAPPER(VkBuffer)
336 WRAPPER(VkBufferView)
337 WRAPPER(VkCommandPool)
338 WRAPPER(VkDescriptorPool)
339 WRAPPER(VkDescriptorSet)
340 WRAPPER(VkDescriptorSetLayout)
341 WRAPPER(VkDeviceMemory)
342 WRAPPER(VkEvent)
343 WRAPPER(VkFence)
344 WRAPPER(VkFramebuffer)
345 WRAPPER(VkImage)
346 WRAPPER(VkImageView)
347 WRAPPER(VkPipeline)
348 WRAPPER(VkPipelineCache)
349 WRAPPER(VkPipelineLayout)
350 WRAPPER(VkQueryPool)
351 WRAPPER(VkRenderPass)
352 WRAPPER(VkSampler)
353 WRAPPER(VkSemaphore)
354 WRAPPER(VkShaderModule)
355 WRAPPER(VkDebugReportCallbackEXT)
356 WRAPPER(VkObjectTableNVX)
357 WRAPPER(VkIndirectCommandsLayoutNVX)
358 WRAPPER(VkDisplayKHR)
359 WRAPPER(VkDisplayModeKHR)
360 WRAPPER(VkSurfaceKHR)
361 WRAPPER(VkSwapchainKHR)
362 WRAPPER(VkDescriptorUpdateTemplateKHR)
363 WRAPPER(VkValidationCacheEXT)
364 WRAPPER(VkSamplerYcbcrConversionKHR)
365 #else   // DISTINCT_NONDISPATCHABLE_HANDLES
366 WRAPPER(uint64_t)
367 #endif  // DISTINCT_NONDISPATCHABLE_HANDLES
368 
369 static std::unordered_map<void *, layer_data *> layer_data_map;
370 static std::mutex command_pool_lock;
371 static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
372 
373 // VkCommandBuffer needs check for implicit use of command pool
374 static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
375     if (lockPool) {
376         std::unique_lock<std::mutex> lock(command_pool_lock);
377         VkCommandPool pool = command_pool_map[object];
378         lock.unlock();
379         startWriteObject(my_data, pool);
380     }
381     my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
382 }
383 static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
384     my_data->c_VkCommandBuffer.finishWrite(object);
385     if (lockPool) {
386         std::unique_lock<std::mutex> lock(command_pool_lock);
387         VkCommandPool pool = command_pool_map[object];
388         lock.unlock();
389         finishWriteObject(my_data, pool);
390     }
391 }
392 static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) {
393     std::unique_lock<std::mutex> lock(command_pool_lock);
394     VkCommandPool pool = command_pool_map[object];
395     lock.unlock();
396     startReadObject(my_data, pool);
397     my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
398 }
399 static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) {
400     my_data->c_VkCommandBuffer.finishRead(object);
401     std::unique_lock<std::mutex> lock(command_pool_lock);
402     VkCommandPool pool = command_pool_map[object];
403     lock.unlock();
404     finishReadObject(my_data, pool);
405 }
406 #endif  // THREADING_H
407