1 /* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and/or associated documentation files (the "Materials"), to
7 * deal in the Materials without restriction, including without limitation the
8 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
9 * sell copies of the Materials, and to permit persons to whom the Materials
10 * are furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice(s) and this permission notice shall be included
13 * in all copies or substantial portions of the Materials.
14 *
15 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 *
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
22 * USE OR OTHER DEALINGS IN THE MATERIALS
23 *
24 * Author: Cody Northrop <cody@lunarg.com>
25 * Author: Mike Stroyan <mike@LunarG.com>
26 */
27
28 #ifndef THREADING_H
29 #define THREADING_H
30 #include <vector>
31 #include "vk_layer_config.h"
32 #include "vk_layer_logging.h"
33
34 #if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || \
35 defined(__aarch64__) || defined(__powerpc64__)
36 // If pointers are 64-bit, then there can be separate counters for each
37 // NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
38 #define DISTINCT_NONDISPATCHABLE_HANDLES
39 #endif
40
41 // Draw State ERROR codes
42 typedef enum _THREADING_CHECKER_ERROR {
43 THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
44 THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
45 THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
46 } THREADING_CHECKER_ERROR;
47
48 struct object_use_data {
49 loader_platform_thread_id thread;
50 int reader_count;
51 int writer_count;
52 };
53
54 struct layer_data;
55
56 static int threadingLockInitialized = 0;
57 static loader_platform_thread_mutex threadingLock;
58 static loader_platform_thread_cond threadingCond;
59
60 template <typename T> class counter {
61 public:
62 const char *typeName;
63 VkDebugReportObjectTypeEXT objectType;
64 std::unordered_map<T, object_use_data> uses;
startWrite(debug_report_data * report_data,T object)65 void startWrite(debug_report_data *report_data, T object) {
66 VkBool32 skipCall = VK_FALSE;
67 loader_platform_thread_id tid = loader_platform_get_thread_id();
68 loader_platform_thread_lock_mutex(&threadingLock);
69 if (uses.find(object) == uses.end()) {
70 // There is no current use of the object. Record writer thread.
71 struct object_use_data *use_data = &uses[object];
72 use_data->reader_count = 0;
73 use_data->writer_count = 1;
74 use_data->thread = tid;
75 } else {
76 struct object_use_data *use_data = &uses[object];
77 if (use_data->reader_count == 0) {
78 // There are no readers. Two writers just collided.
79 if (use_data->thread != tid) {
80 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
81 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
82 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
83 typeName, use_data->thread, tid);
84 if (skipCall) {
85 // Wait for thread-safe access to object instead of skipping call.
86 while (uses.find(object) != uses.end()) {
87 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
88 }
89 // There is now no current use of the object. Record writer thread.
90 struct object_use_data *use_data = &uses[object];
91 use_data->thread = tid;
92 use_data->reader_count = 0;
93 use_data->writer_count = 1;
94 } else {
95 // Continue with an unsafe use of the object.
96 use_data->thread = tid;
97 use_data->writer_count += 1;
98 }
99 } else {
100 // This is either safe multiple use in one call, or recursive use.
101 // There is no way to make recursion safe. Just forge ahead.
102 use_data->writer_count += 1;
103 }
104 } else {
105 // There are readers. This writer collided with them.
106 if (use_data->thread != tid) {
107 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
108 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
109 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
110 typeName, use_data->thread, tid);
111 if (skipCall) {
112 // Wait for thread-safe access to object instead of skipping call.
113 while (uses.find(object) != uses.end()) {
114 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
115 }
116 // There is now no current use of the object. Record writer thread.
117 struct object_use_data *use_data = &uses[object];
118 use_data->thread = tid;
119 use_data->reader_count = 0;
120 use_data->writer_count = 1;
121 } else {
122 // Continue with an unsafe use of the object.
123 use_data->thread = tid;
124 use_data->writer_count += 1;
125 }
126 } else {
127 // This is either safe multiple use in one call, or recursive use.
128 // There is no way to make recursion safe. Just forge ahead.
129 use_data->writer_count += 1;
130 }
131 }
132 }
133 loader_platform_thread_unlock_mutex(&threadingLock);
134 }
135
finishWrite(T object)136 void finishWrite(T object) {
137 // Object is no longer in use
138 loader_platform_thread_lock_mutex(&threadingLock);
139 uses[object].writer_count -= 1;
140 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
141 uses.erase(object);
142 }
143 // Notify any waiting threads that this object may be safe to use
144 loader_platform_thread_cond_broadcast(&threadingCond);
145 loader_platform_thread_unlock_mutex(&threadingLock);
146 }
147
startRead(debug_report_data * report_data,T object)148 void startRead(debug_report_data *report_data, T object) {
149 VkBool32 skipCall = VK_FALSE;
150 loader_platform_thread_id tid = loader_platform_get_thread_id();
151 loader_platform_thread_lock_mutex(&threadingLock);
152 if (uses.find(object) == uses.end()) {
153 // There is no current use of the object. Record reader count
154 struct object_use_data *use_data = &uses[object];
155 use_data->reader_count = 1;
156 use_data->writer_count = 0;
157 use_data->thread = tid;
158 } else if (uses[object].writer_count > 0 && uses[object].thread != tid) {
159 // There is a writer of the object.
160 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
161 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
162 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", typeName,
163 uses[object].thread, tid);
164 if (skipCall) {
165 // Wait for thread-safe access to object instead of skipping call.
166 while (uses.find(object) != uses.end()) {
167 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
168 }
169 // There is no current use of the object. Record reader count
170 struct object_use_data *use_data = &uses[object];
171 use_data->reader_count = 1;
172 use_data->writer_count = 0;
173 use_data->thread = tid;
174 } else {
175 uses[object].reader_count += 1;
176 }
177 } else {
178 // There are other readers of the object. Increase reader count
179 uses[object].reader_count += 1;
180 }
181 loader_platform_thread_unlock_mutex(&threadingLock);
182 }
finishRead(T object)183 void finishRead(T object) {
184 loader_platform_thread_lock_mutex(&threadingLock);
185 uses[object].reader_count -= 1;
186 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
187 uses.erase(object);
188 }
189 // Notify and waiting threads that this object may be safe to use
190 loader_platform_thread_cond_broadcast(&threadingCond);
191 loader_platform_thread_unlock_mutex(&threadingLock);
192 }
193 counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
194 typeName = name;
195 objectType = type;
196 }
197 };
198
199 struct layer_data {
200 debug_report_data *report_data;
201 std::vector<VkDebugReportCallbackEXT> logging_callback;
202 VkLayerDispatchTable *device_dispatch_table;
203 VkLayerInstanceDispatchTable *instance_dispatch_table;
204 counter<VkCommandBuffer> c_VkCommandBuffer;
205 counter<VkDevice> c_VkDevice;
206 counter<VkInstance> c_VkInstance;
207 counter<VkQueue> c_VkQueue;
208 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
209 counter<VkBuffer> c_VkBuffer;
210 counter<VkBufferView> c_VkBufferView;
211 counter<VkCommandPool> c_VkCommandPool;
212 counter<VkDescriptorPool> c_VkDescriptorPool;
213 counter<VkDescriptorSet> c_VkDescriptorSet;
214 counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
215 counter<VkDeviceMemory> c_VkDeviceMemory;
216 counter<VkEvent> c_VkEvent;
217 counter<VkFence> c_VkFence;
218 counter<VkFramebuffer> c_VkFramebuffer;
219 counter<VkImage> c_VkImage;
220 counter<VkImageView> c_VkImageView;
221 counter<VkPipeline> c_VkPipeline;
222 counter<VkPipelineCache> c_VkPipelineCache;
223 counter<VkPipelineLayout> c_VkPipelineLayout;
224 counter<VkQueryPool> c_VkQueryPool;
225 counter<VkRenderPass> c_VkRenderPass;
226 counter<VkSampler> c_VkSampler;
227 counter<VkSemaphore> c_VkSemaphore;
228 counter<VkShaderModule> c_VkShaderModule;
229 counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
230 #else // DISTINCT_NONDISPATCHABLE_HANDLES
231 counter<uint64_t> c_uint64_t;
232 #endif // DISTINCT_NONDISPATCHABLE_HANDLES
layer_datalayer_data233 layer_data()
234 : report_data(nullptr), c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
235 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
236 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
237 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
238 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
239 c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
240 c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
241 c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
242 c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
243 c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
244 c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
245 c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
246 c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT), c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
247 c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
248 c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
249 c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
250 c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
251 c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
252 c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
253 c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
254 c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
255 c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
256 c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
257 c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
258 c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT)
259 #else // DISTINCT_NONDISPATCHABLE_HANDLES
260 c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
261 #endif // DISTINCT_NONDISPATCHABLE_HANDLES
262 {};
263 };
264
265 #define WRAPPER(type) \
266 static void startWriteObject(struct layer_data *my_data, type object) { \
267 my_data->c_##type.startWrite(my_data->report_data, object); \
268 } \
269 static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \
270 static void startReadObject(struct layer_data *my_data, type object) { \
271 my_data->c_##type.startRead(my_data->report_data, object); \
272 } \
273 static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); }
274
275 WRAPPER(VkDevice)
276 WRAPPER(VkInstance)
277 WRAPPER(VkQueue)
278 #ifdef DISTINCT_NONDISPATCHABLE_HANDLES
279 WRAPPER(VkBuffer)
280 WRAPPER(VkBufferView)
281 WRAPPER(VkCommandPool)
282 WRAPPER(VkDescriptorPool)
283 WRAPPER(VkDescriptorSet)
284 WRAPPER(VkDescriptorSetLayout)
285 WRAPPER(VkDeviceMemory)
286 WRAPPER(VkEvent)
287 WRAPPER(VkFence)
288 WRAPPER(VkFramebuffer)
289 WRAPPER(VkImage)
290 WRAPPER(VkImageView)
291 WRAPPER(VkPipeline)
292 WRAPPER(VkPipelineCache)
293 WRAPPER(VkPipelineLayout)
294 WRAPPER(VkQueryPool)
295 WRAPPER(VkRenderPass)
296 WRAPPER(VkSampler)
297 WRAPPER(VkSemaphore)
298 WRAPPER(VkShaderModule)
299 WRAPPER(VkDebugReportCallbackEXT)
300 #else // DISTINCT_NONDISPATCHABLE_HANDLES
301 WRAPPER(uint64_t)
302 #endif // DISTINCT_NONDISPATCHABLE_HANDLES
303
304 static std::unordered_map<void *, layer_data *> layer_data_map;
305 static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
306
307 // VkCommandBuffer needs check for implicit use of command pool
308 static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
309 if (lockPool) {
310 loader_platform_thread_lock_mutex(&threadingLock);
311 VkCommandPool pool = command_pool_map[object];
312 loader_platform_thread_unlock_mutex(&threadingLock);
313 startWriteObject(my_data, pool);
314 }
315 my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
316 }
317 static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
318 my_data->c_VkCommandBuffer.finishWrite(object);
319 if (lockPool) {
320 loader_platform_thread_lock_mutex(&threadingLock);
321 VkCommandPool pool = command_pool_map[object];
322 loader_platform_thread_unlock_mutex(&threadingLock);
323 finishWriteObject(my_data, pool);
324 }
325 }
startReadObject(struct layer_data * my_data,VkCommandBuffer object)326 static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) {
327 loader_platform_thread_lock_mutex(&threadingLock);
328 VkCommandPool pool = command_pool_map[object];
329 loader_platform_thread_unlock_mutex(&threadingLock);
330 startReadObject(my_data, pool);
331 my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
332 }
finishReadObject(struct layer_data * my_data,VkCommandBuffer object)333 static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) {
334 my_data->c_VkCommandBuffer.finishRead(object);
335 loader_platform_thread_lock_mutex(&threadingLock);
336 VkCommandPool pool = command_pool_map[object];
337 loader_platform_thread_unlock_mutex(&threadingLock);
338 finishReadObject(my_data, pool);
339 }
340 #endif // THREADING_H
341