1 /* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Cody Northrop <cnorthrop@google.com>
26 * Author: Michael Lentine <mlentine@google.com>
27 * Author: Tobin Ehlis <tobine@google.com>
28 * Author: Chia-I Wu <olv@google.com>
29 * Author: Chris Forbes <chrisf@ijw.co.nz>
30 * Author: Mark Lobodzinski <mark@lunarg.com>
31 * Author: Ian Elliott <ianelliott@google.com>
32 */
33
34 // Allow use of STL min and max functions in Windows
35 #define NOMINMAX
36
37 // Turn on mem_tracker merged code
38 #define MTMERGESOURCE 1
39
40 #include <stdio.h>
41 #include <stdlib.h>
42 #include <string.h>
43 #include <assert.h>
44 #include <unordered_map>
45 #include <unordered_set>
46 #include <map>
47 #include <string>
48 #include <iostream>
49 #include <algorithm>
50 #include <list>
51 #include <SPIRV/spirv.hpp>
52 #include <set>
53
54 #include "vk_loader_platform.h"
55 #include "vk_dispatch_table_helper.h"
56 #include "vk_struct_string_helper_cpp.h"
57 #if defined(__GNUC__)
58 #pragma GCC diagnostic ignored "-Wwrite-strings"
59 #endif
60 #if defined(__GNUC__)
61 #pragma GCC diagnostic warning "-Wwrite-strings"
62 #endif
63 #include "vk_struct_size_helper.h"
64 #include "core_validation.h"
65 #include "vk_layer_config.h"
66 #include "vk_layer_table.h"
67 #include "vk_layer_data.h"
68 #include "vk_layer_logging.h"
69 #include "vk_layer_extension_utils.h"
70 #include "vk_layer_utils.h"
71
72 #if defined __ANDROID__
73 #include <android/log.h>
74 #define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
75 #else
76 #define LOGCONSOLE(...) printf(__VA_ARGS__)
77 #endif
78
79 using std::unordered_map;
80 using std::unordered_set;
81
82 #if MTMERGESOURCE
83 // WSI Image Objects bypass usual Image Object creation methods. A special Memory
84 // Object value will be used to identify them internally.
85 static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86 #endif
87 // Track command pools and their command buffers
88 struct CMD_POOL_INFO {
89 VkCommandPoolCreateFlags createFlags;
90 uint32_t queueFamilyIndex;
91 list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
92 };
93
94 struct devExts {
95 VkBool32 wsi_enabled;
96 unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
97 unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
98 };
99
100 // fwd decls
101 struct shader_module;
102 struct render_pass;
103
104 struct layer_data {
105 debug_report_data *report_data;
106 std::vector<VkDebugReportCallbackEXT> logging_callback;
107 VkLayerDispatchTable *device_dispatch_table;
108 VkLayerInstanceDispatchTable *instance_dispatch_table;
109 #if MTMERGESOURCE
110 // MTMERGESOURCE - stuff pulled directly from MT
111 uint64_t currentFenceId;
112 // Maps for tracking key structs related to mem_tracker state
113 unordered_map<VkDescriptorSet, MT_DESCRIPTOR_SET_INFO> descriptorSetMap;
114 // Images and Buffers are 2 objects that can have memory bound to them so they get special treatment
115 unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageBindingMap;
116 unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferBindingMap;
117 // MTMERGESOURCE - End of MT stuff
118 #endif
119 devExts device_extensions;
120 vector<VkQueue> queues; // all queues under given device
121 // Global set of all cmdBuffers that are inFlight on this device
122 unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
123 // Layer specific data
124 unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
125 unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
126 unordered_map<VkImage, IMAGE_NODE> imageMap;
127 unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
128 unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
129 unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
130 unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
131 unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
132 unordered_map<VkDescriptorSet, SET_NODE *> setMap;
133 unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
134 unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
135 unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
136 unordered_map<VkFence, FENCE_NODE> fenceMap;
137 unordered_map<VkQueue, QUEUE_NODE> queueMap;
138 unordered_map<VkEvent, EVENT_NODE> eventMap;
139 unordered_map<QueryObject, bool> queryToStateMap;
140 unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
141 unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
142 unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
143 unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
144 unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
145 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
146 unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
147 unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
148 // Current render pass
149 VkRenderPassBeginInfo renderPassBeginInfo;
150 uint32_t currentSubpass;
151
152 // Device specific data
153 PHYS_DEV_PROPERTIES_NODE physDevProperties;
154 // MTMERGESOURCE - added a couple of fields to constructor initializer
layer_datalayer_data155 layer_data()
156 : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
157 #if MTMERGESOURCE
158 currentFenceId(1),
159 #endif
160 device_extensions(){};
161 };
162
163 static const VkLayerProperties cv_global_layers[] = {{
164 "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
165 }};
166
ValidateLayerOrdering(const TCreateInfo & createInfo)167 template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
168 bool foundLayer = false;
169 for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
170 if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
171 foundLayer = true;
172 }
173 // This has to be logged to console as we don't have a callback at this point.
174 if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
175 LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
176 cv_global_layers[0].layerName);
177 }
178 }
179 }
180
181 // Code imported from shader_checker
182 static void build_def_index(shader_module *);
183
184 // A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
185 // without the caller needing to care too much about the physical SPIRV module layout.
186 struct spirv_inst_iter {
187 std::vector<uint32_t>::const_iterator zero;
188 std::vector<uint32_t>::const_iterator it;
189
lenspirv_inst_iter190 uint32_t len() { return *it >> 16; }
opcodespirv_inst_iter191 uint32_t opcode() { return *it & 0x0ffffu; }
wordspirv_inst_iter192 uint32_t const &word(unsigned n) { return it[n]; }
offsetspirv_inst_iter193 uint32_t offset() { return (uint32_t)(it - zero); }
194
spirv_inst_iterspirv_inst_iter195 spirv_inst_iter() {}
196
spirv_inst_iterspirv_inst_iter197 spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
198
operator ==spirv_inst_iter199 bool operator==(spirv_inst_iter const &other) { return it == other.it; }
200
operator !=spirv_inst_iter201 bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
202
operator ++spirv_inst_iter203 spirv_inst_iter operator++(int) { /* x++ */
204 spirv_inst_iter ii = *this;
205 it += len();
206 return ii;
207 }
208
operator ++spirv_inst_iter209 spirv_inst_iter operator++() { /* ++x; */
210 it += len();
211 return *this;
212 }
213
214 /* The iterator and the value are the same thing. */
operator *spirv_inst_iter215 spirv_inst_iter &operator*() { return *this; }
operator *spirv_inst_iter216 spirv_inst_iter const &operator*() const { return *this; }
217 };
218
219 struct shader_module {
220 /* the spirv image itself */
221 vector<uint32_t> words;
222 /* a mapping of <id> to the first word of its def. this is useful because walking type
223 * trees, constant expressions, etc requires jumping all over the instruction stream.
224 */
225 unordered_map<unsigned, unsigned> def_index;
226
shader_moduleshader_module227 shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
228 : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
229 def_index() {
230
231 build_def_index(this);
232 }
233
234 /* expose begin() / end() to enable range-based for */
beginshader_module235 spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
endshader_module236 spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } /* just past last insn */
237 /* given an offset into the module, produce an iterator there. */
atshader_module238 spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
239
240 /* gets an iterator to the definition of an id */
get_defshader_module241 spirv_inst_iter get_def(unsigned id) const {
242 auto it = def_index.find(id);
243 if (it == def_index.end()) {
244 return end();
245 }
246 return at(it->second);
247 }
248 };
249
250 // TODO : Do we need to guard access to layer_data_map w/ lock?
251 static unordered_map<void *, layer_data *> layer_data_map;
252
253 // TODO : This can be much smarter, using separate locks for separate global data
254 static int globalLockInitialized = 0;
255 static loader_platform_thread_mutex globalLock;
256 #define MAX_TID 513
257 static loader_platform_thread_id g_tidMapping[MAX_TID] = {0};
258 static uint32_t g_maxTID = 0;
259 #if MTMERGESOURCE
260 // MTMERGESOURCE - start of direct pull
261 static VkPhysicalDeviceMemoryProperties memProps;
262
263 static void clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb);
264
265 #define MAX_BINDING 0xFFFFFFFF
266
get_object_binding_info(layer_data * my_data,uint64_t handle,VkDebugReportObjectTypeEXT type)267 static MT_OBJ_BINDING_INFO *get_object_binding_info(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
268 MT_OBJ_BINDING_INFO *retValue = NULL;
269 switch (type) {
270 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
271 auto it = my_data->imageBindingMap.find(handle);
272 if (it != my_data->imageBindingMap.end())
273 return &(*it).second;
274 break;
275 }
276 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
277 auto it = my_data->bufferBindingMap.find(handle);
278 if (it != my_data->bufferBindingMap.end())
279 return &(*it).second;
280 break;
281 }
282 default:
283 break;
284 }
285 return retValue;
286 }
287 // MTMERGESOURCE - end section
288 #endif
289 template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
290
291 // prototype
292 static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
293
294 #if MTMERGESOURCE
delete_queue_info_list(layer_data * my_data)295 static void delete_queue_info_list(layer_data *my_data) {
296 // Process queue list, cleaning up each entry before deleting
297 my_data->queueMap.clear();
298 }
299
300 // Delete CBInfo from container and clear mem references to CB
delete_cmd_buf_info(layer_data * my_data,VkCommandPool commandPool,const VkCommandBuffer cb)301 static void delete_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
302 clear_cmd_buf_and_mem_references(my_data, cb);
303 // Delete the CBInfo info
304 my_data->commandPoolMap[commandPool].commandBuffers.remove(cb);
305 my_data->commandBufferMap.erase(cb);
306 }
307
add_object_binding_info(layer_data * my_data,const uint64_t handle,const VkDebugReportObjectTypeEXT type,const VkDeviceMemory mem)308 static void add_object_binding_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
309 const VkDeviceMemory mem) {
310 switch (type) {
311 // Buffers and images are unique as their CreateInfo is in container struct
312 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
313 auto pCI = &my_data->bufferBindingMap[handle];
314 pCI->mem = mem;
315 break;
316 }
317 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
318 auto pCI = &my_data->imageBindingMap[handle];
319 pCI->mem = mem;
320 break;
321 }
322 default:
323 break;
324 }
325 }
326
add_object_create_info(layer_data * my_data,const uint64_t handle,const VkDebugReportObjectTypeEXT type,const void * pCreateInfo)327 static void add_object_create_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
328 const void *pCreateInfo) {
329 // TODO : For any CreateInfo struct that has ptrs, need to deep copy them and appropriately clean up on Destroy
330 switch (type) {
331 // Buffers and images are unique as their CreateInfo is in container struct
332 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
333 auto pCI = &my_data->bufferBindingMap[handle];
334 memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
335 memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
336 break;
337 }
338 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
339 auto pCI = &my_data->imageBindingMap[handle];
340 memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
341 memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
342 break;
343 }
344 // Swap Chain is very unique, use my_data->imageBindingMap, but copy in
345 // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
346 // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
347 case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: {
348 auto pCI = &my_data->imageBindingMap[handle];
349 memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
350 pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
351 pCI->valid = false;
352 pCI->create_info.image.usage =
353 const_cast<VkSwapchainCreateInfoKHR *>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
354 break;
355 }
356 default:
357 break;
358 }
359 }
360
361 // Add a fence, creating one if necessary to our list of fences/fenceIds
add_fence_info(layer_data * my_data,VkFence fence,VkQueue queue,uint64_t * fenceId)362 static VkBool32 add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
363 VkBool32 skipCall = VK_FALSE;
364 *fenceId = my_data->currentFenceId++;
365
366 // If no fence, create an internal fence to track the submissions
367 if (fence != VK_NULL_HANDLE) {
368 my_data->fenceMap[fence].fenceId = *fenceId;
369 my_data->fenceMap[fence].queue = queue;
370 // Validate that fence is in UNSIGNALED state
371 VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
372 if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
373 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
374 (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
375 "Fence %#" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted",
376 (uint64_t)fence);
377 }
378 } else {
379 // TODO : Do we need to create an internal fence here for tracking purposes?
380 }
381 // Update most recently submitted fence and fenceId for Queue
382 my_data->queueMap[queue].lastSubmittedId = *fenceId;
383 return skipCall;
384 }
385
386 // Remove a fenceInfo from our list of fences/fenceIds
delete_fence_info(layer_data * my_data,VkFence fence)387 static void delete_fence_info(layer_data *my_data, VkFence fence) { my_data->fenceMap.erase(fence); }
388
389 // Record information when a fence is known to be signalled
update_fence_tracking(layer_data * my_data,VkFence fence)390 static void update_fence_tracking(layer_data *my_data, VkFence fence) {
391 auto fence_item = my_data->fenceMap.find(fence);
392 if (fence_item != my_data->fenceMap.end()) {
393 FENCE_NODE *pCurFenceInfo = &(*fence_item).second;
394 VkQueue queue = pCurFenceInfo->queue;
395 auto queue_item = my_data->queueMap.find(queue);
396 if (queue_item != my_data->queueMap.end()) {
397 QUEUE_NODE *pQueueInfo = &(*queue_item).second;
398 if (pQueueInfo->lastRetiredId < pCurFenceInfo->fenceId) {
399 pQueueInfo->lastRetiredId = pCurFenceInfo->fenceId;
400 }
401 }
402 }
403
404 // Update fence state in fenceCreateInfo structure
405 auto pFCI = &(my_data->fenceMap[fence].createInfo);
406 pFCI->flags = static_cast<VkFenceCreateFlags>(pFCI->flags | VK_FENCE_CREATE_SIGNALED_BIT);
407 }
408
409 // Helper routine that updates the fence list for a specific queue to all-retired
retire_queue_fences(layer_data * my_data,VkQueue queue)410 static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
411 QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
412 // Set queue's lastRetired to lastSubmitted indicating all fences completed
413 pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
414 }
415
416 // Helper routine that updates all queues to all-retired
retire_device_fences(layer_data * my_data,VkDevice device)417 static void retire_device_fences(layer_data *my_data, VkDevice device) {
418 // Process each queue for device
419 // TODO: Add multiple device support
420 for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
421 // Set queue's lastRetired to lastSubmitted indicating all fences completed
422 QUEUE_NODE *pQueueInfo = &(*ii).second;
423 pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
424 }
425 }
426
427 // Helper function to validate correct usage bits set for buffers or images
428 // Verify that (actual & desired) flags != 0 or,
429 // if strict is true, verify that (actual & desired) flags == desired
430 // In case of error, report it via dbg callbacks
validate_usage_flags(layer_data * my_data,void * disp_obj,VkFlags actual,VkFlags desired,VkBool32 strict,uint64_t obj_handle,VkDebugReportObjectTypeEXT obj_type,char const * ty_str,char const * func_name,char const * usage_str)431 static VkBool32 validate_usage_flags(layer_data *my_data, void *disp_obj, VkFlags actual, VkFlags desired, VkBool32 strict,
432 uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
433 char const *func_name, char const *usage_str) {
434 VkBool32 correct_usage = VK_FALSE;
435 VkBool32 skipCall = VK_FALSE;
436 if (strict)
437 correct_usage = ((actual & desired) == desired);
438 else
439 correct_usage = ((actual & desired) != 0);
440 if (!correct_usage) {
441 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
442 MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
443 " used by %s. In this case, %s should have %s set during creation.",
444 ty_str, obj_handle, func_name, ty_str, usage_str);
445 }
446 return skipCall;
447 }
448
449 // Helper function to validate usage flags for images
450 // Pulls image info and then sends actual vs. desired usage off to helper above where
451 // an error will be flagged if usage is not correct
validate_image_usage_flags(layer_data * my_data,void * disp_obj,VkImage image,VkFlags desired,VkBool32 strict,char const * func_name,char const * usage_string)452 static VkBool32 validate_image_usage_flags(layer_data *my_data, void *disp_obj, VkImage image, VkFlags desired, VkBool32 strict,
453 char const *func_name, char const *usage_string) {
454 VkBool32 skipCall = VK_FALSE;
455 MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
456 if (pBindInfo) {
457 skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict, (uint64_t)image,
458 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
459 }
460 return skipCall;
461 }
462
463 // Helper function to validate usage flags for buffers
464 // Pulls buffer info and then sends actual vs. desired usage off to helper above where
465 // an error will be flagged if usage is not correct
validate_buffer_usage_flags(layer_data * my_data,void * disp_obj,VkBuffer buffer,VkFlags desired,VkBool32 strict,char const * func_name,char const * usage_string)466 static VkBool32 validate_buffer_usage_flags(layer_data *my_data, void *disp_obj, VkBuffer buffer, VkFlags desired, VkBool32 strict,
467 char const *func_name, char const *usage_string) {
468 VkBool32 skipCall = VK_FALSE;
469 MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
470 if (pBindInfo) {
471 skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict, (uint64_t)buffer,
472 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
473 }
474 return skipCall;
475 }
476
477 // Return ptr to info in map container containing mem, or NULL if not found
478 // Calls to this function should be wrapped in mutex
get_mem_obj_info(layer_data * dev_data,const VkDeviceMemory mem)479 static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
480 auto item = dev_data->memObjMap.find(mem);
481 if (item != dev_data->memObjMap.end()) {
482 return &(*item).second;
483 } else {
484 return NULL;
485 }
486 }
487
add_mem_obj_info(layer_data * my_data,void * object,const VkDeviceMemory mem,const VkMemoryAllocateInfo * pAllocateInfo)488 static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
489 const VkMemoryAllocateInfo *pAllocateInfo) {
490 assert(object != NULL);
491
492 memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
493 // TODO: Update for real hardware, actually process allocation info structures
494 my_data->memObjMap[mem].allocInfo.pNext = NULL;
495 my_data->memObjMap[mem].object = object;
496 my_data->memObjMap[mem].refCount = 0;
497 my_data->memObjMap[mem].mem = mem;
498 my_data->memObjMap[mem].image = VK_NULL_HANDLE;
499 my_data->memObjMap[mem].memRange.offset = 0;
500 my_data->memObjMap[mem].memRange.size = 0;
501 my_data->memObjMap[mem].pData = 0;
502 my_data->memObjMap[mem].pDriverData = 0;
503 my_data->memObjMap[mem].valid = false;
504 }
505
validate_memory_is_valid(layer_data * dev_data,VkDeviceMemory mem,const char * functionName,VkImage image=VK_NULL_HANDLE)506 static VkBool32 validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
507 VkImage image = VK_NULL_HANDLE) {
508 if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
509 MT_OBJ_BINDING_INFO *pBindInfo =
510 get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
511 if (pBindInfo && !pBindInfo->valid) {
512 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
513 (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
514 "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
515 functionName, (uint64_t)(image));
516 }
517 } else {
518 DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
519 if (pMemObj && !pMemObj->valid) {
520 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
521 (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
522 "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
523 (uint64_t)(mem));
524 }
525 }
526 return false;
527 }
528
set_memory_valid(layer_data * dev_data,VkDeviceMemory mem,bool valid,VkImage image=VK_NULL_HANDLE)529 static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
530 if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
531 MT_OBJ_BINDING_INFO *pBindInfo =
532 get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
533 if (pBindInfo) {
534 pBindInfo->valid = valid;
535 }
536 } else {
537 DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
538 if (pMemObj) {
539 pMemObj->valid = valid;
540 }
541 }
542 }
543
544 // Find CB Info and add mem reference to list container
545 // Find Mem Obj Info and add CB reference to list container
update_cmd_buf_and_mem_references(layer_data * dev_data,const VkCommandBuffer cb,const VkDeviceMemory mem,const char * apiName)546 static VkBool32 update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
547 const char *apiName) {
548 VkBool32 skipCall = VK_FALSE;
549
550 // Skip validation if this image was created through WSI
551 if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
552
553 // First update CB binding in MemObj mini CB list
554 DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
555 if (pMemInfo) {
556 // Search for cmd buffer object in memory object's binding list
557 VkBool32 found = VK_FALSE;
558 if (pMemInfo->pCommandBufferBindings.size() > 0) {
559 for (list<VkCommandBuffer>::iterator it = pMemInfo->pCommandBufferBindings.begin();
560 it != pMemInfo->pCommandBufferBindings.end(); ++it) {
561 if ((*it) == cb) {
562 found = VK_TRUE;
563 break;
564 }
565 }
566 }
567 // If not present, add to list
568 if (found == VK_FALSE) {
569 pMemInfo->pCommandBufferBindings.push_front(cb);
570 pMemInfo->refCount++;
571 }
572 // Now update CBInfo's Mem reference list
573 GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
574 // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
575 if (pCBNode) {
576 // Search for memory object in cmd buffer's reference list
577 VkBool32 found = VK_FALSE;
578 if (pCBNode->pMemObjList.size() > 0) {
579 for (auto it = pCBNode->pMemObjList.begin(); it != pCBNode->pMemObjList.end(); ++it) {
580 if ((*it) == mem) {
581 found = VK_TRUE;
582 break;
583 }
584 }
585 }
586 // If not present, add to list
587 if (found == VK_FALSE) {
588 pCBNode->pMemObjList.push_front(mem);
589 }
590 }
591 }
592 }
593 return skipCall;
594 }
595
596 // Free bindings related to CB
clear_cmd_buf_and_mem_references(layer_data * dev_data,const VkCommandBuffer cb)597 static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
598 GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
599
600 if (pCBNode) {
601 if (pCBNode->pMemObjList.size() > 0) {
602 list<VkDeviceMemory> mem_obj_list = pCBNode->pMemObjList;
603 for (list<VkDeviceMemory>::iterator it = mem_obj_list.begin(); it != mem_obj_list.end(); ++it) {
604 DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, *it);
605 if (pInfo) {
606 pInfo->pCommandBufferBindings.remove(cb);
607 pInfo->refCount--;
608 }
609 }
610 pCBNode->pMemObjList.clear();
611 }
612 pCBNode->activeDescriptorSets.clear();
613 pCBNode->validate_functions.clear();
614 }
615 }
616
617 // Delete the entire CB list
delete_cmd_buf_info_list(layer_data * my_data)618 static void delete_cmd_buf_info_list(layer_data *my_data) {
619 for (auto &cb_node : my_data->commandBufferMap) {
620 clear_cmd_buf_and_mem_references(my_data, cb_node.first);
621 }
622 my_data->commandBufferMap.clear();
623 }
624
625 // For given MemObjInfo, report Obj & CB bindings
reportMemReferencesAndCleanUp(layer_data * dev_data,DEVICE_MEM_INFO * pMemObjInfo)626 static VkBool32 reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
627 VkBool32 skipCall = VK_FALSE;
628 size_t cmdBufRefCount = pMemObjInfo->pCommandBufferBindings.size();
629 size_t objRefCount = pMemObjInfo->pObjBindings.size();
630
631 if ((pMemObjInfo->pCommandBufferBindings.size()) != 0) {
632 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
633 (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
634 "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
635 " references",
636 (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
637 }
638
639 if (cmdBufRefCount > 0 && pMemObjInfo->pCommandBufferBindings.size() > 0) {
640 for (list<VkCommandBuffer>::const_iterator it = pMemObjInfo->pCommandBufferBindings.begin();
641 it != pMemObjInfo->pCommandBufferBindings.end(); ++it) {
642 // TODO : CommandBuffer should be source Obj here
643 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
644 (uint64_t)(*it), __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
645 "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, (*it), (uint64_t)pMemObjInfo->mem);
646 }
647 // Clear the list of hanging references
648 pMemObjInfo->pCommandBufferBindings.clear();
649 }
650
651 if (objRefCount > 0 && pMemObjInfo->pObjBindings.size() > 0) {
652 for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
653 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, it->type, it->handle, __LINE__,
654 MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
655 it->handle, (uint64_t)pMemObjInfo->mem);
656 }
657 // Clear the list of hanging references
658 pMemObjInfo->pObjBindings.clear();
659 }
660 return skipCall;
661 }
662
deleteMemObjInfo(layer_data * my_data,void * object,VkDeviceMemory mem)663 static VkBool32 deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
664 VkBool32 skipCall = VK_FALSE;
665 auto item = my_data->memObjMap.find(mem);
666 if (item != my_data->memObjMap.end()) {
667 my_data->memObjMap.erase(item);
668 } else {
669 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
670 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
671 "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
672 }
673 return skipCall;
674 }
675
676 // Check if fence for given CB is completed
checkCBCompleted(layer_data * my_data,const VkCommandBuffer cb,bool * complete)677 static bool checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, bool *complete) {
678 GLOBAL_CB_NODE *pCBNode = getCBNode(my_data, cb);
679 VkBool32 skipCall = false;
680 *complete = true;
681
682 if (pCBNode) {
683 if (pCBNode->lastSubmittedQueue != NULL) {
684 VkQueue queue = pCBNode->lastSubmittedQueue;
685 QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
686 if (pCBNode->fenceId > pQueueInfo->lastRetiredId) {
687 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
688 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
689 "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
690 (uint64_t)pCBNode->lastSubmittedFence, cb);
691 *complete = false;
692 }
693 }
694 }
695 return skipCall;
696 }
697
freeMemObjInfo(layer_data * dev_data,void * object,VkDeviceMemory mem,VkBool32 internal)698 static VkBool32 freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, VkBool32 internal) {
699 VkBool32 skipCall = VK_FALSE;
700 // Parse global list to find info w/ mem
701 DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
702 if (pInfo) {
703 if (pInfo->allocInfo.allocationSize == 0 && !internal) {
704 // TODO: Verify against Valid Use section
705 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
706 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
707 "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
708 "this should not be explicitly freed\n",
709 (uint64_t)mem);
710 } else {
711 // Clear any CB bindings for completed CBs
712 // TODO : Is there a better place to do this?
713
714 bool commandBufferComplete = false;
715 assert(pInfo->object != VK_NULL_HANDLE);
716 list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
717 list<VkCommandBuffer>::iterator temp;
718 while (pInfo->pCommandBufferBindings.size() > 0 && it != pInfo->pCommandBufferBindings.end()) {
719 skipCall |= checkCBCompleted(dev_data, *it, &commandBufferComplete);
720 if (commandBufferComplete) {
721 temp = it;
722 ++temp;
723 clear_cmd_buf_and_mem_references(dev_data, *it);
724 it = temp;
725 } else {
726 ++it;
727 }
728 }
729
730 // Now verify that no references to this mem obj remain and remove bindings
731 if (0 != pInfo->refCount) {
732 skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
733 }
734 // Delete mem obj info
735 skipCall |= deleteMemObjInfo(dev_data, object, mem);
736 }
737 }
738 return skipCall;
739 }
740
object_type_to_string(VkDebugReportObjectTypeEXT type)741 static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
742 switch (type) {
743 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
744 return "image";
745 break;
746 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
747 return "buffer";
748 break;
749 case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
750 return "swapchain";
751 break;
752 default:
753 return "unknown";
754 }
755 }
756
757 // Remove object binding performs 3 tasks:
758 // 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
759 // 2. Decrement refCount for MemObjInfo
760 // 3. Clear mem binding for image/buffer by setting its handle to 0
761 // TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
clear_object_binding(layer_data * dev_data,void * dispObj,uint64_t handle,VkDebugReportObjectTypeEXT type)762 static VkBool32 clear_object_binding(layer_data *dev_data, void *dispObj, uint64_t handle, VkDebugReportObjectTypeEXT type) {
763 // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
764 VkBool32 skipCall = VK_FALSE;
765 MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
766 if (pObjBindInfo) {
767 DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, pObjBindInfo->mem);
768 // TODO : Make sure this is a reasonable way to reset mem binding
769 pObjBindInfo->mem = VK_NULL_HANDLE;
770 if (pMemObjInfo) {
771 // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the
772 // memObj's refcount
773 // and set the objects memory binding pointer to NULL.
774 VkBool32 clearSucceeded = VK_FALSE;
775 for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
776 if ((it->handle == handle) && (it->type == type)) {
777 pMemObjInfo->refCount--;
778 pMemObjInfo->pObjBindings.erase(it);
779 clearSucceeded = VK_TRUE;
780 break;
781 }
782 }
783 if (VK_FALSE == clearSucceeded) {
784 skipCall |=
785 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
786 "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
787 ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
788 object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
789 }
790 }
791 }
792 return skipCall;
793 }
794
795 // For NULL mem case, output warning
796 // Make sure given object is in global object map
797 // IF a previous binding existed, output validation error
798 // Otherwise, add reference from objectInfo to memoryInfo
799 // Add reference off of objInfo
800 // device is required for error logging, need a dispatchable
801 // object for that.
set_mem_binding(layer_data * dev_data,void * dispatch_object,VkDeviceMemory mem,uint64_t handle,VkDebugReportObjectTypeEXT type,const char * apiName)802 static VkBool32 set_mem_binding(layer_data *dev_data, void *dispatch_object, VkDeviceMemory mem, uint64_t handle,
803 VkDebugReportObjectTypeEXT type, const char *apiName) {
804 VkBool32 skipCall = VK_FALSE;
805 // Handle NULL case separately, just clear previous binding & decrement reference
806 if (mem == VK_NULL_HANDLE) {
807 // TODO: Verify against Valid Use section of spec.
808 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
809 "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
810 } else {
811 MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
812 if (!pObjBindInfo) {
813 skipCall |=
814 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
815 "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list()",
816 object_type_to_string(type), apiName, handle);
817 } else {
818 // non-null case so should have real mem obj
819 DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
820 if (pMemInfo) {
821 // TODO : Need to track mem binding for obj and report conflict here
822 DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, pObjBindInfo->mem);
823 if (pPrevBinding != NULL) {
824 skipCall |=
825 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
826 (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
827 "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
828 ") which has already been bound to mem object %#" PRIxLEAST64,
829 apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
830 } else {
831 MT_OBJ_HANDLE_TYPE oht;
832 oht.handle = handle;
833 oht.type = type;
834 pMemInfo->pObjBindings.push_front(oht);
835 pMemInfo->refCount++;
836 // For image objects, make sure default memory state is correctly set
837 // TODO : What's the best/correct way to handle this?
838 if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
839 VkImageCreateInfo ici = pObjBindInfo->create_info.image;
840 if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
841 // TODO:: More memory state transition stuff.
842 }
843 }
844 pObjBindInfo->mem = mem;
845 }
846 }
847 }
848 }
849 return skipCall;
850 }
851
852 // For NULL mem case, clear any previous binding Else...
853 // Make sure given object is in its object map
854 // IF a previous binding existed, update binding
855 // Add reference from objectInfo to memoryInfo
856 // Add reference off of object's binding info
857 // Return VK_TRUE if addition is successful, VK_FALSE otherwise
set_sparse_mem_binding(layer_data * dev_data,void * dispObject,VkDeviceMemory mem,uint64_t handle,VkDebugReportObjectTypeEXT type,const char * apiName)858 static VkBool32 set_sparse_mem_binding(layer_data *dev_data, void *dispObject, VkDeviceMemory mem, uint64_t handle,
859 VkDebugReportObjectTypeEXT type, const char *apiName) {
860 VkBool32 skipCall = VK_FALSE;
861 // Handle NULL case separately, just clear previous binding & decrement reference
862 if (mem == VK_NULL_HANDLE) {
863 skipCall = clear_object_binding(dev_data, dispObject, handle, type);
864 } else {
865 MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
866 if (!pObjBindInfo) {
867 skipCall |= log_msg(
868 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
869 "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
870 }
871 // non-null case so should have real mem obj
872 DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
873 if (pInfo) {
874 // Search for object in memory object's binding list
875 VkBool32 found = VK_FALSE;
876 if (pInfo->pObjBindings.size() > 0) {
877 for (auto it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
878 if (((*it).handle == handle) && ((*it).type == type)) {
879 found = VK_TRUE;
880 break;
881 }
882 }
883 }
884 // If not present, add to list
885 if (found == VK_FALSE) {
886 MT_OBJ_HANDLE_TYPE oht;
887 oht.handle = handle;
888 oht.type = type;
889 pInfo->pObjBindings.push_front(oht);
890 pInfo->refCount++;
891 }
892 // Need to set mem binding for this object
893 pObjBindInfo->mem = mem;
894 }
895 }
896 return skipCall;
897 }
898
899 template <typename T>
print_object_map_members(layer_data * my_data,void * dispObj,T const & objectName,VkDebugReportObjectTypeEXT objectType,const char * objectStr)900 void print_object_map_members(layer_data *my_data, void *dispObj, T const &objectName, VkDebugReportObjectTypeEXT objectType,
901 const char *objectStr) {
902 for (auto const &element : objectName) {
903 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objectType, 0, __LINE__, MEMTRACK_NONE, "MEM",
904 " %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
905 }
906 }
907
908 // For given Object, get 'mem' obj that it's bound to or NULL if no binding
get_mem_binding_from_object(layer_data * my_data,void * dispObj,const uint64_t handle,const VkDebugReportObjectTypeEXT type,VkDeviceMemory * mem)909 static VkBool32 get_mem_binding_from_object(layer_data *my_data, void *dispObj, const uint64_t handle,
910 const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
911 VkBool32 skipCall = VK_FALSE;
912 *mem = VK_NULL_HANDLE;
913 MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
914 if (pObjBindInfo) {
915 if (pObjBindInfo->mem) {
916 *mem = pObjBindInfo->mem;
917 } else {
918 skipCall =
919 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
920 "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
921 }
922 } else {
923 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
924 "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
925 object_type_to_string(type));
926 }
927 return skipCall;
928 }
929
930 // Print details of MemObjInfo list
print_mem_list(layer_data * dev_data,void * dispObj)931 static void print_mem_list(layer_data *dev_data, void *dispObj) {
932 DEVICE_MEM_INFO *pInfo = NULL;
933
934 // Early out if info is not requested
935 if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
936 return;
937 }
938
939 // Just printing each msg individually for now, may want to package these into single large print
940 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
941 MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
942 dev_data->memObjMap.size());
943 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
944 MEMTRACK_NONE, "MEM", "=============================");
945
946 if (dev_data->memObjMap.size() <= 0)
947 return;
948
949 for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
950 pInfo = &(*ii).second;
951
952 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
953 __LINE__, MEMTRACK_NONE, "MEM", " ===MemObjInfo at %p===", (void *)pInfo);
954 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
955 __LINE__, MEMTRACK_NONE, "MEM", " Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
956 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
957 __LINE__, MEMTRACK_NONE, "MEM", " Ref Count: %u", pInfo->refCount);
958 if (0 != pInfo->allocInfo.allocationSize) {
959 string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO): ");
960 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
961 __LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
962 } else {
963 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
964 __LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
965 }
966
967 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
968 __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
969 pInfo->pObjBindings.size());
970 if (pInfo->pObjBindings.size() > 0) {
971 for (list<MT_OBJ_HANDLE_TYPE>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
972 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
973 0, __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT %" PRIu64, it->handle);
974 }
975 }
976
977 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
978 __LINE__, MEMTRACK_NONE, "MEM",
979 " VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
980 pInfo->pCommandBufferBindings.size());
981 if (pInfo->pCommandBufferBindings.size() > 0) {
982 for (list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
983 it != pInfo->pCommandBufferBindings.end(); ++it) {
984 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
985 0, __LINE__, MEMTRACK_NONE, "MEM", " VK CB %p", (*it));
986 }
987 }
988 }
989 }
990
printCBList(layer_data * my_data,void * dispObj)991 static void printCBList(layer_data *my_data, void *dispObj) {
992 GLOBAL_CB_NODE *pCBInfo = NULL;
993
994 // Early out if info is not requested
995 if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
996 return;
997 }
998
999 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1000 MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
1001 my_data->commandBufferMap.size());
1002 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1003 MEMTRACK_NONE, "MEM", "==================");
1004
1005 if (my_data->commandBufferMap.size() <= 0)
1006 return;
1007
1008 for (auto &cb_node : my_data->commandBufferMap) {
1009 pCBInfo = cb_node.second;
1010
1011 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1012 __LINE__, MEMTRACK_NONE, "MEM", " CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
1013 (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
1014
1015 if (pCBInfo->pMemObjList.size() <= 0)
1016 continue;
1017 for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
1018 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1019 __LINE__, MEMTRACK_NONE, "MEM", " Mem obj %" PRIu64, (uint64_t)(*it));
1020 }
1021 }
1022 }
1023
1024 #endif
1025
1026 // Map actual TID to an index value and return that index
1027 // This keeps TIDs in range from 0-MAX_TID and simplifies compares between runs
getTIDIndex()1028 static uint32_t getTIDIndex() {
1029 loader_platform_thread_id tid = loader_platform_get_thread_id();
1030 for (uint32_t i = 0; i < g_maxTID; i++) {
1031 if (tid == g_tidMapping[i])
1032 return i;
1033 }
1034 // Don't yet have mapping, set it and return newly set index
1035 uint32_t retVal = (uint32_t)g_maxTID;
1036 g_tidMapping[g_maxTID++] = tid;
1037 assert(g_maxTID < MAX_TID);
1038 return retVal;
1039 }
1040
1041 // Return a string representation of CMD_TYPE enum
cmdTypeToString(CMD_TYPE cmd)1042 static string cmdTypeToString(CMD_TYPE cmd) {
1043 switch (cmd) {
1044 case CMD_BINDPIPELINE:
1045 return "CMD_BINDPIPELINE";
1046 case CMD_BINDPIPELINEDELTA:
1047 return "CMD_BINDPIPELINEDELTA";
1048 case CMD_SETVIEWPORTSTATE:
1049 return "CMD_SETVIEWPORTSTATE";
1050 case CMD_SETLINEWIDTHSTATE:
1051 return "CMD_SETLINEWIDTHSTATE";
1052 case CMD_SETDEPTHBIASSTATE:
1053 return "CMD_SETDEPTHBIASSTATE";
1054 case CMD_SETBLENDSTATE:
1055 return "CMD_SETBLENDSTATE";
1056 case CMD_SETDEPTHBOUNDSSTATE:
1057 return "CMD_SETDEPTHBOUNDSSTATE";
1058 case CMD_SETSTENCILREADMASKSTATE:
1059 return "CMD_SETSTENCILREADMASKSTATE";
1060 case CMD_SETSTENCILWRITEMASKSTATE:
1061 return "CMD_SETSTENCILWRITEMASKSTATE";
1062 case CMD_SETSTENCILREFERENCESTATE:
1063 return "CMD_SETSTENCILREFERENCESTATE";
1064 case CMD_BINDDESCRIPTORSETS:
1065 return "CMD_BINDDESCRIPTORSETS";
1066 case CMD_BINDINDEXBUFFER:
1067 return "CMD_BINDINDEXBUFFER";
1068 case CMD_BINDVERTEXBUFFER:
1069 return "CMD_BINDVERTEXBUFFER";
1070 case CMD_DRAW:
1071 return "CMD_DRAW";
1072 case CMD_DRAWINDEXED:
1073 return "CMD_DRAWINDEXED";
1074 case CMD_DRAWINDIRECT:
1075 return "CMD_DRAWINDIRECT";
1076 case CMD_DRAWINDEXEDINDIRECT:
1077 return "CMD_DRAWINDEXEDINDIRECT";
1078 case CMD_DISPATCH:
1079 return "CMD_DISPATCH";
1080 case CMD_DISPATCHINDIRECT:
1081 return "CMD_DISPATCHINDIRECT";
1082 case CMD_COPYBUFFER:
1083 return "CMD_COPYBUFFER";
1084 case CMD_COPYIMAGE:
1085 return "CMD_COPYIMAGE";
1086 case CMD_BLITIMAGE:
1087 return "CMD_BLITIMAGE";
1088 case CMD_COPYBUFFERTOIMAGE:
1089 return "CMD_COPYBUFFERTOIMAGE";
1090 case CMD_COPYIMAGETOBUFFER:
1091 return "CMD_COPYIMAGETOBUFFER";
1092 case CMD_CLONEIMAGEDATA:
1093 return "CMD_CLONEIMAGEDATA";
1094 case CMD_UPDATEBUFFER:
1095 return "CMD_UPDATEBUFFER";
1096 case CMD_FILLBUFFER:
1097 return "CMD_FILLBUFFER";
1098 case CMD_CLEARCOLORIMAGE:
1099 return "CMD_CLEARCOLORIMAGE";
1100 case CMD_CLEARATTACHMENTS:
1101 return "CMD_CLEARCOLORATTACHMENT";
1102 case CMD_CLEARDEPTHSTENCILIMAGE:
1103 return "CMD_CLEARDEPTHSTENCILIMAGE";
1104 case CMD_RESOLVEIMAGE:
1105 return "CMD_RESOLVEIMAGE";
1106 case CMD_SETEVENT:
1107 return "CMD_SETEVENT";
1108 case CMD_RESETEVENT:
1109 return "CMD_RESETEVENT";
1110 case CMD_WAITEVENTS:
1111 return "CMD_WAITEVENTS";
1112 case CMD_PIPELINEBARRIER:
1113 return "CMD_PIPELINEBARRIER";
1114 case CMD_BEGINQUERY:
1115 return "CMD_BEGINQUERY";
1116 case CMD_ENDQUERY:
1117 return "CMD_ENDQUERY";
1118 case CMD_RESETQUERYPOOL:
1119 return "CMD_RESETQUERYPOOL";
1120 case CMD_COPYQUERYPOOLRESULTS:
1121 return "CMD_COPYQUERYPOOLRESULTS";
1122 case CMD_WRITETIMESTAMP:
1123 return "CMD_WRITETIMESTAMP";
1124 case CMD_INITATOMICCOUNTERS:
1125 return "CMD_INITATOMICCOUNTERS";
1126 case CMD_LOADATOMICCOUNTERS:
1127 return "CMD_LOADATOMICCOUNTERS";
1128 case CMD_SAVEATOMICCOUNTERS:
1129 return "CMD_SAVEATOMICCOUNTERS";
1130 case CMD_BEGINRENDERPASS:
1131 return "CMD_BEGINRENDERPASS";
1132 case CMD_ENDRENDERPASS:
1133 return "CMD_ENDRENDERPASS";
1134 default:
1135 return "UNKNOWN";
1136 }
1137 }
1138
1139 // SPIRV utility functions
build_def_index(shader_module * module)1140 static void build_def_index(shader_module *module) {
1141 for (auto insn : *module) {
1142 switch (insn.opcode()) {
1143 /* Types */
1144 case spv::OpTypeVoid:
1145 case spv::OpTypeBool:
1146 case spv::OpTypeInt:
1147 case spv::OpTypeFloat:
1148 case spv::OpTypeVector:
1149 case spv::OpTypeMatrix:
1150 case spv::OpTypeImage:
1151 case spv::OpTypeSampler:
1152 case spv::OpTypeSampledImage:
1153 case spv::OpTypeArray:
1154 case spv::OpTypeRuntimeArray:
1155 case spv::OpTypeStruct:
1156 case spv::OpTypeOpaque:
1157 case spv::OpTypePointer:
1158 case spv::OpTypeFunction:
1159 case spv::OpTypeEvent:
1160 case spv::OpTypeDeviceEvent:
1161 case spv::OpTypeReserveId:
1162 case spv::OpTypeQueue:
1163 case spv::OpTypePipe:
1164 module->def_index[insn.word(1)] = insn.offset();
1165 break;
1166
1167 /* Fixed constants */
1168 case spv::OpConstantTrue:
1169 case spv::OpConstantFalse:
1170 case spv::OpConstant:
1171 case spv::OpConstantComposite:
1172 case spv::OpConstantSampler:
1173 case spv::OpConstantNull:
1174 module->def_index[insn.word(2)] = insn.offset();
1175 break;
1176
1177 /* Specialization constants */
1178 case spv::OpSpecConstantTrue:
1179 case spv::OpSpecConstantFalse:
1180 case spv::OpSpecConstant:
1181 case spv::OpSpecConstantComposite:
1182 case spv::OpSpecConstantOp:
1183 module->def_index[insn.word(2)] = insn.offset();
1184 break;
1185
1186 /* Variables */
1187 case spv::OpVariable:
1188 module->def_index[insn.word(2)] = insn.offset();
1189 break;
1190
1191 /* Functions */
1192 case spv::OpFunction:
1193 module->def_index[insn.word(2)] = insn.offset();
1194 break;
1195
1196 default:
1197 /* We don't care about any other defs for now. */
1198 break;
1199 }
1200 }
1201 }
1202
find_entrypoint(shader_module * src,char const * name,VkShaderStageFlagBits stageBits)1203 static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1204 for (auto insn : *src) {
1205 if (insn.opcode() == spv::OpEntryPoint) {
1206 auto entrypointName = (char const *)&insn.word(3);
1207 auto entrypointStageBits = 1u << insn.word(1);
1208
1209 if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1210 return insn;
1211 }
1212 }
1213 }
1214
1215 return src->end();
1216 }
1217
shader_is_spirv(VkShaderModuleCreateInfo const * pCreateInfo)1218 bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
1219 uint32_t *words = (uint32_t *)pCreateInfo->pCode;
1220 size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
1221
1222 /* Just validate that the header makes sense. */
1223 return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
1224 }
1225
storage_class_name(unsigned sc)1226 static char const *storage_class_name(unsigned sc) {
1227 switch (sc) {
1228 case spv::StorageClassInput:
1229 return "input";
1230 case spv::StorageClassOutput:
1231 return "output";
1232 case spv::StorageClassUniformConstant:
1233 return "const uniform";
1234 case spv::StorageClassUniform:
1235 return "uniform";
1236 case spv::StorageClassWorkgroup:
1237 return "workgroup local";
1238 case spv::StorageClassCrossWorkgroup:
1239 return "workgroup global";
1240 case spv::StorageClassPrivate:
1241 return "private global";
1242 case spv::StorageClassFunction:
1243 return "function";
1244 case spv::StorageClassGeneric:
1245 return "generic";
1246 case spv::StorageClassAtomicCounter:
1247 return "atomic counter";
1248 case spv::StorageClassImage:
1249 return "image";
1250 case spv::StorageClassPushConstant:
1251 return "push constant";
1252 default:
1253 return "unknown";
1254 }
1255 }
1256
1257 /* get the value of an integral constant */
get_constant_value(shader_module const * src,unsigned id)1258 unsigned get_constant_value(shader_module const *src, unsigned id) {
1259 auto value = src->get_def(id);
1260 assert(value != src->end());
1261
1262 if (value.opcode() != spv::OpConstant) {
1263 /* TODO: Either ensure that the specialization transform is already performed on a module we're
1264 considering here, OR -- specialize on the fly now.
1265 */
1266 return 1;
1267 }
1268
1269 return value.word(3);
1270 }
1271
1272
describe_type_inner(std::ostringstream & ss,shader_module const * src,unsigned type)1273 static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1274 auto insn = src->get_def(type);
1275 assert(insn != src->end());
1276
1277 switch (insn.opcode()) {
1278 case spv::OpTypeBool:
1279 ss << "bool";
1280 break;
1281 case spv::OpTypeInt:
1282 ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1283 break;
1284 case spv::OpTypeFloat:
1285 ss << "float" << insn.word(2);
1286 break;
1287 case spv::OpTypeVector:
1288 ss << "vec" << insn.word(3) << " of ";
1289 describe_type_inner(ss, src, insn.word(2));
1290 break;
1291 case spv::OpTypeMatrix:
1292 ss << "mat" << insn.word(3) << " of ";
1293 describe_type_inner(ss, src, insn.word(2));
1294 break;
1295 case spv::OpTypeArray:
1296 ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1297 describe_type_inner(ss, src, insn.word(2));
1298 break;
1299 case spv::OpTypePointer:
1300 ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1301 describe_type_inner(ss, src, insn.word(3));
1302 break;
1303 case spv::OpTypeStruct: {
1304 ss << "struct of (";
1305 for (unsigned i = 2; i < insn.len(); i++) {
1306 describe_type_inner(ss, src, insn.word(i));
1307 if (i == insn.len() - 1) {
1308 ss << ")";
1309 } else {
1310 ss << ", ";
1311 }
1312 }
1313 break;
1314 }
1315 case spv::OpTypeSampler:
1316 ss << "sampler";
1317 break;
1318 case spv::OpTypeSampledImage:
1319 ss << "sampler+";
1320 describe_type_inner(ss, src, insn.word(2));
1321 break;
1322 case spv::OpTypeImage:
1323 ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1324 break;
1325 default:
1326 ss << "oddtype";
1327 break;
1328 }
1329 }
1330
1331
describe_type(shader_module const * src,unsigned type)1332 static std::string describe_type(shader_module const *src, unsigned type) {
1333 std::ostringstream ss;
1334 describe_type_inner(ss, src, type);
1335 return ss.str();
1336 }
1337
1338
types_match(shader_module const * a,shader_module const * b,unsigned a_type,unsigned b_type,bool b_arrayed)1339 static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed) {
1340 /* walk two type trees together, and complain about differences */
1341 auto a_insn = a->get_def(a_type);
1342 auto b_insn = b->get_def(b_type);
1343 assert(a_insn != a->end());
1344 assert(b_insn != b->end());
1345
1346 if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1347 /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1348 return types_match(a, b, a_type, b_insn.word(2), false);
1349 }
1350
1351 if (a_insn.opcode() != b_insn.opcode()) {
1352 return false;
1353 }
1354
1355 switch (a_insn.opcode()) {
1356 /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
1357 case spv::OpTypeBool:
1358 return true && !b_arrayed;
1359 case spv::OpTypeInt:
1360 /* match on width, signedness */
1361 return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3) && !b_arrayed;
1362 case spv::OpTypeFloat:
1363 /* match on width */
1364 return a_insn.word(2) == b_insn.word(2) && !b_arrayed;
1365 case spv::OpTypeVector:
1366 case spv::OpTypeMatrix:
1367 /* match on element type, count. these all have the same layout. we don't get here if
1368 * b_arrayed -- that is handled above. */
1369 return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) && a_insn.word(3) == b_insn.word(3);
1370 case spv::OpTypeArray:
1371 /* match on element type, count. these all have the same layout. we don't get here if
1372 * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1373 * not a literal within OpTypeArray */
1374 return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) &&
1375 get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1376 case spv::OpTypeStruct:
1377 /* match on all element types */
1378 {
1379 if (b_arrayed) {
1380 /* for the purposes of matching different levels of arrayness, structs are leaves. */
1381 return false;
1382 }
1383
1384 if (a_insn.len() != b_insn.len()) {
1385 return false; /* structs cannot match if member counts differ */
1386 }
1387
1388 for (unsigned i = 2; i < a_insn.len(); i++) {
1389 if (!types_match(a, b, a_insn.word(i), b_insn.word(i), b_arrayed)) {
1390 return false;
1391 }
1392 }
1393
1394 return true;
1395 }
1396 case spv::OpTypePointer:
1397 /* match on pointee type. storage class is expected to differ */
1398 return types_match(a, b, a_insn.word(3), b_insn.word(3), b_arrayed);
1399
1400 default:
1401 /* remaining types are CLisms, or may not appear in the interfaces we
1402 * are interested in. Just claim no match.
1403 */
1404 return false;
1405 }
1406 }
1407
value_or_default(std::unordered_map<unsigned,unsigned> const & map,unsigned id,int def)1408 static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1409 auto it = map.find(id);
1410 if (it == map.end())
1411 return def;
1412 else
1413 return it->second;
1414 }
1415
get_locations_consumed_by_type(shader_module const * src,unsigned type,bool strip_array_level)1416 static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1417 auto insn = src->get_def(type);
1418 assert(insn != src->end());
1419
1420 switch (insn.opcode()) {
1421 case spv::OpTypePointer:
1422 /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1423 * we're never actually passing pointers around. */
1424 return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1425 case spv::OpTypeArray:
1426 if (strip_array_level) {
1427 return get_locations_consumed_by_type(src, insn.word(2), false);
1428 } else {
1429 return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1430 }
1431 case spv::OpTypeMatrix:
1432 /* num locations is the dimension * element size */
1433 return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1434 default:
1435 /* everything else is just 1. */
1436 return 1;
1437
1438 /* TODO: extend to handle 64bit scalar types, whose vectors may need
1439 * multiple locations. */
1440 }
1441 }
1442
1443 typedef std::pair<unsigned, unsigned> location_t;
1444 typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1445
1446 struct interface_var {
1447 uint32_t id;
1448 uint32_t type_id;
1449 uint32_t offset;
1450 /* TODO: collect the name, too? Isn't required to be present. */
1451 };
1452
get_struct_type(shader_module const * src,spirv_inst_iter def,bool is_array_of_verts)1453 static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1454 while (true) {
1455
1456 if (def.opcode() == spv::OpTypePointer) {
1457 def = src->get_def(def.word(3));
1458 } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1459 def = src->get_def(def.word(2));
1460 is_array_of_verts = false;
1461 } else if (def.opcode() == spv::OpTypeStruct) {
1462 return def;
1463 } else {
1464 return src->end();
1465 }
1466 }
1467 }
1468
collect_interface_block_members(layer_data * my_data,VkDevice dev,shader_module const * src,std::map<location_t,interface_var> & out,std::unordered_map<unsigned,unsigned> const & blocks,bool is_array_of_verts,uint32_t id,uint32_t type_id)1469 static void collect_interface_block_members(layer_data *my_data, VkDevice dev, shader_module const *src,
1470 std::map<location_t, interface_var> &out,
1471 std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1472 uint32_t id, uint32_t type_id) {
1473 /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1474 auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts);
1475 if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1476 /* this isn't an interface block. */
1477 return;
1478 }
1479
1480 std::unordered_map<unsigned, unsigned> member_components;
1481
1482 /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1483 for (auto insn : *src) {
1484 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1485 unsigned member_index = insn.word(2);
1486
1487 if (insn.word(3) == spv::DecorationComponent) {
1488 unsigned component = insn.word(4);
1489 member_components[member_index] = component;
1490 }
1491 }
1492 }
1493
1494 /* Second pass -- produce the output, from Location decorations */
1495 for (auto insn : *src) {
1496 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1497 unsigned member_index = insn.word(2);
1498 unsigned member_type_id = type.word(2 + member_index);
1499
1500 if (insn.word(3) == spv::DecorationLocation) {
1501 unsigned location = insn.word(4);
1502 unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1503 auto component_it = member_components.find(member_index);
1504 unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1505
1506 for (unsigned int offset = 0; offset < num_locations; offset++) {
1507 interface_var v;
1508 v.id = id;
1509 /* TODO: member index in interface_var too? */
1510 v.type_id = member_type_id;
1511 v.offset = offset;
1512 out[std::make_pair(location + offset, component)] = v;
1513 }
1514 }
1515 }
1516 }
1517 }
1518
collect_interface_by_location(layer_data * my_data,VkDevice dev,shader_module const * src,spirv_inst_iter entrypoint,spv::StorageClass sinterface,std::map<location_t,interface_var> & out,bool is_array_of_verts)1519 static void collect_interface_by_location(layer_data *my_data, VkDevice dev, shader_module const *src, spirv_inst_iter entrypoint,
1520 spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1521 bool is_array_of_verts) {
1522 std::unordered_map<unsigned, unsigned> var_locations;
1523 std::unordered_map<unsigned, unsigned> var_builtins;
1524 std::unordered_map<unsigned, unsigned> var_components;
1525 std::unordered_map<unsigned, unsigned> blocks;
1526
1527 for (auto insn : *src) {
1528
1529 /* We consider two interface models: SSO rendezvous-by-location, and
1530 * builtins. Complain about anything that fits neither model.
1531 */
1532 if (insn.opcode() == spv::OpDecorate) {
1533 if (insn.word(2) == spv::DecorationLocation) {
1534 var_locations[insn.word(1)] = insn.word(3);
1535 }
1536
1537 if (insn.word(2) == spv::DecorationBuiltIn) {
1538 var_builtins[insn.word(1)] = insn.word(3);
1539 }
1540
1541 if (insn.word(2) == spv::DecorationComponent) {
1542 var_components[insn.word(1)] = insn.word(3);
1543 }
1544
1545 if (insn.word(2) == spv::DecorationBlock) {
1546 blocks[insn.word(1)] = 1;
1547 }
1548 }
1549 }
1550
1551 /* TODO: handle grouped decorations */
1552 /* TODO: handle index=1 dual source outputs from FS -- two vars will
1553 * have the same location, and we DONT want to clobber. */
1554
1555 /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1556 terminator, to fill out the rest of the word - so we only need to look at the last byte in
1557 the word to determine which word contains the terminator. */
1558 auto word = 3;
1559 while (entrypoint.word(word) & 0xff000000u) {
1560 ++word;
1561 }
1562 ++word;
1563
1564 for (; word < entrypoint.len(); word++) {
1565 auto insn = src->get_def(entrypoint.word(word));
1566 assert(insn != src->end());
1567 assert(insn.opcode() == spv::OpVariable);
1568
1569 if (insn.word(3) == sinterface) {
1570 unsigned id = insn.word(2);
1571 unsigned type = insn.word(1);
1572
1573 int location = value_or_default(var_locations, id, -1);
1574 int builtin = value_or_default(var_builtins, id, -1);
1575 unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1576
1577 /* All variables and interface block members in the Input or Output storage classes
1578 * must be decorated with either a builtin or an explicit location.
1579 *
1580 * TODO: integrate the interface block support here. For now, don't complain --
1581 * a valid SPIRV module will only hit this path for the interface block case, as the
1582 * individual members of the type are decorated, rather than variable declarations.
1583 */
1584
1585 if (location != -1) {
1586 /* A user-defined interface variable, with a location. Where a variable
1587 * occupied multiple locations, emit one result for each. */
1588 unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts);
1589 for (unsigned int offset = 0; offset < num_locations; offset++) {
1590 interface_var v;
1591 v.id = id;
1592 v.type_id = type;
1593 v.offset = offset;
1594 out[std::make_pair(location + offset, component)] = v;
1595 }
1596 } else if (builtin == -1) {
1597 /* An interface block instance */
1598 collect_interface_block_members(my_data, dev, src, out, blocks, is_array_of_verts, id, type);
1599 }
1600 }
1601 }
1602 }
1603
collect_interface_by_descriptor_slot(layer_data * my_data,VkDevice dev,shader_module const * src,std::unordered_set<uint32_t> const & accessible_ids,std::map<descriptor_slot_t,interface_var> & out)1604 static void collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev, shader_module const *src,
1605 std::unordered_set<uint32_t> const &accessible_ids,
1606 std::map<descriptor_slot_t, interface_var> &out) {
1607
1608 std::unordered_map<unsigned, unsigned> var_sets;
1609 std::unordered_map<unsigned, unsigned> var_bindings;
1610
1611 for (auto insn : *src) {
1612 /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1613 * DecorationDescriptorSet and DecorationBinding.
1614 */
1615 if (insn.opcode() == spv::OpDecorate) {
1616 if (insn.word(2) == spv::DecorationDescriptorSet) {
1617 var_sets[insn.word(1)] = insn.word(3);
1618 }
1619
1620 if (insn.word(2) == spv::DecorationBinding) {
1621 var_bindings[insn.word(1)] = insn.word(3);
1622 }
1623 }
1624 }
1625
1626 for (auto id : accessible_ids) {
1627 auto insn = src->get_def(id);
1628 assert(insn != src->end());
1629
1630 if (insn.opcode() == spv::OpVariable &&
1631 (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1632 unsigned set = value_or_default(var_sets, insn.word(2), 0);
1633 unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1634
1635 auto existing_it = out.find(std::make_pair(set, binding));
1636 if (existing_it != out.end()) {
1637 /* conflict within spv image */
1638 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1639 __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1640 "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1641 insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1642 existing_it->first.second);
1643 }
1644
1645 interface_var v;
1646 v.id = insn.word(2);
1647 v.type_id = insn.word(1);
1648 out[std::make_pair(set, binding)] = v;
1649 }
1650 }
1651 }
1652
validate_interface_between_stages(layer_data * my_data,VkDevice dev,shader_module const * producer,spirv_inst_iter producer_entrypoint,char const * producer_name,shader_module const * consumer,spirv_inst_iter consumer_entrypoint,char const * consumer_name,bool consumer_arrayed_input)1653 static bool validate_interface_between_stages(layer_data *my_data, VkDevice dev, shader_module const *producer,
1654 spirv_inst_iter producer_entrypoint, char const *producer_name,
1655 shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1656 char const *consumer_name, bool consumer_arrayed_input) {
1657 std::map<location_t, interface_var> outputs;
1658 std::map<location_t, interface_var> inputs;
1659
1660 bool pass = true;
1661
1662 collect_interface_by_location(my_data, dev, producer, producer_entrypoint, spv::StorageClassOutput, outputs, false);
1663 collect_interface_by_location(my_data, dev, consumer, consumer_entrypoint, spv::StorageClassInput, inputs,
1664 consumer_arrayed_input);
1665
1666 auto a_it = outputs.begin();
1667 auto b_it = inputs.begin();
1668
1669 /* maps sorted by key (location); walk them together to find mismatches */
1670 while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1671 bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1672 bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1673 auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1674 auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1675
1676 if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1677 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
1678 /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1679 "%s writes to output location %u.%u which is not consumed by %s", producer_name, a_first.first,
1680 a_first.second, consumer_name)) {
1681 pass = false;
1682 }
1683 a_it++;
1684 } else if (a_at_end || a_first > b_first) {
1685 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1686 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1687 "%s consumes input location %u.%u which is not written by %s", consumer_name, b_first.first, b_first.second,
1688 producer_name)) {
1689 pass = false;
1690 }
1691 b_it++;
1692 } else {
1693 if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, consumer_arrayed_input)) {
1694 /* OK! */
1695 } else {
1696 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1697 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1698 a_first.first, a_first.second,
1699 describe_type(producer, a_it->second.type_id).c_str(),
1700 describe_type(consumer, b_it->second.type_id).c_str())) {
1701 pass = false;
1702 }
1703 }
1704 a_it++;
1705 b_it++;
1706 }
1707 }
1708
1709 return pass;
1710 }
1711
1712 enum FORMAT_TYPE {
1713 FORMAT_TYPE_UNDEFINED,
1714 FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1715 FORMAT_TYPE_SINT,
1716 FORMAT_TYPE_UINT,
1717 };
1718
get_format_type(VkFormat fmt)1719 static unsigned get_format_type(VkFormat fmt) {
1720 switch (fmt) {
1721 case VK_FORMAT_UNDEFINED:
1722 return FORMAT_TYPE_UNDEFINED;
1723 case VK_FORMAT_R8_SINT:
1724 case VK_FORMAT_R8G8_SINT:
1725 case VK_FORMAT_R8G8B8_SINT:
1726 case VK_FORMAT_R8G8B8A8_SINT:
1727 case VK_FORMAT_R16_SINT:
1728 case VK_FORMAT_R16G16_SINT:
1729 case VK_FORMAT_R16G16B16_SINT:
1730 case VK_FORMAT_R16G16B16A16_SINT:
1731 case VK_FORMAT_R32_SINT:
1732 case VK_FORMAT_R32G32_SINT:
1733 case VK_FORMAT_R32G32B32_SINT:
1734 case VK_FORMAT_R32G32B32A32_SINT:
1735 case VK_FORMAT_B8G8R8_SINT:
1736 case VK_FORMAT_B8G8R8A8_SINT:
1737 case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1738 case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1739 return FORMAT_TYPE_SINT;
1740 case VK_FORMAT_R8_UINT:
1741 case VK_FORMAT_R8G8_UINT:
1742 case VK_FORMAT_R8G8B8_UINT:
1743 case VK_FORMAT_R8G8B8A8_UINT:
1744 case VK_FORMAT_R16_UINT:
1745 case VK_FORMAT_R16G16_UINT:
1746 case VK_FORMAT_R16G16B16_UINT:
1747 case VK_FORMAT_R16G16B16A16_UINT:
1748 case VK_FORMAT_R32_UINT:
1749 case VK_FORMAT_R32G32_UINT:
1750 case VK_FORMAT_R32G32B32_UINT:
1751 case VK_FORMAT_R32G32B32A32_UINT:
1752 case VK_FORMAT_B8G8R8_UINT:
1753 case VK_FORMAT_B8G8R8A8_UINT:
1754 case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1755 case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1756 return FORMAT_TYPE_UINT;
1757 default:
1758 return FORMAT_TYPE_FLOAT;
1759 }
1760 }
1761
1762 /* characterizes a SPIR-V type appearing in an interface to a FF stage,
1763 * for comparison to a VkFormat's characterization above. */
get_fundamental_type(shader_module const * src,unsigned type)1764 static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1765 auto insn = src->get_def(type);
1766 assert(insn != src->end());
1767
1768 switch (insn.opcode()) {
1769 case spv::OpTypeInt:
1770 return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1771 case spv::OpTypeFloat:
1772 return FORMAT_TYPE_FLOAT;
1773 case spv::OpTypeVector:
1774 return get_fundamental_type(src, insn.word(2));
1775 case spv::OpTypeMatrix:
1776 return get_fundamental_type(src, insn.word(2));
1777 case spv::OpTypeArray:
1778 return get_fundamental_type(src, insn.word(2));
1779 case spv::OpTypePointer:
1780 return get_fundamental_type(src, insn.word(3));
1781 default:
1782 return FORMAT_TYPE_UNDEFINED;
1783 }
1784 }
1785
get_shader_stage_id(VkShaderStageFlagBits stage)1786 static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1787 uint32_t bit_pos = u_ffs(stage);
1788 return bit_pos - 1;
1789 }
1790
validate_vi_consistency(layer_data * my_data,VkDevice dev,VkPipelineVertexInputStateCreateInfo const * vi)1791 static bool validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi) {
1792 /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1793 * each binding should be specified only once.
1794 */
1795 std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1796 bool pass = true;
1797
1798 for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1799 auto desc = &vi->pVertexBindingDescriptions[i];
1800 auto &binding = bindings[desc->binding];
1801 if (binding) {
1802 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1803 __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1804 "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1805 pass = false;
1806 }
1807 } else {
1808 binding = desc;
1809 }
1810 }
1811
1812 return pass;
1813 }
1814
validate_vi_against_vs_inputs(layer_data * my_data,VkDevice dev,VkPipelineVertexInputStateCreateInfo const * vi,shader_module const * vs,spirv_inst_iter entrypoint)1815 static bool validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi,
1816 shader_module const *vs, spirv_inst_iter entrypoint) {
1817 std::map<location_t, interface_var> inputs;
1818 bool pass = true;
1819
1820 collect_interface_by_location(my_data, dev, vs, entrypoint, spv::StorageClassInput, inputs, false);
1821
1822 /* Build index by location */
1823 std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1824 if (vi) {
1825 for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
1826 attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
1827 }
1828
1829 auto it_a = attribs.begin();
1830 auto it_b = inputs.begin();
1831
1832 while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1833 bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1834 bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1835 auto a_first = a_at_end ? 0 : it_a->first;
1836 auto b_first = b_at_end ? 0 : it_b->first.first;
1837 if (!a_at_end && (b_at_end || a_first < b_first)) {
1838 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
1839 /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1840 "Vertex attribute at location %d not consumed by VS", a_first)) {
1841 pass = false;
1842 }
1843 it_a++;
1844 } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1845 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1846 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1847 b_first)) {
1848 pass = false;
1849 }
1850 it_b++;
1851 } else {
1852 unsigned attrib_type = get_format_type(it_a->second->format);
1853 unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1854
1855 /* type checking */
1856 if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1857 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1858 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1859 "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1860 string_VkFormat(it_a->second->format), a_first,
1861 describe_type(vs, it_b->second.type_id).c_str())) {
1862 pass = false;
1863 }
1864 }
1865
1866 /* OK! */
1867 it_a++;
1868 it_b++;
1869 }
1870 }
1871
1872 return pass;
1873 }
1874
validate_fs_outputs_against_render_pass(layer_data * my_data,VkDevice dev,shader_module const * fs,spirv_inst_iter entrypoint,RENDER_PASS_NODE const * rp,uint32_t subpass)1875 static bool validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shader_module const *fs,
1876 spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1877 const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1878 std::map<location_t, interface_var> outputs;
1879 bool pass = true;
1880
1881 /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1882
1883 collect_interface_by_location(my_data, dev, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1884
1885 auto it = outputs.begin();
1886 uint32_t attachment = 0;
1887
1888 /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1889 * are currently dense, but the parallel with matching between shader stages is nice.
1890 */
1891
1892 while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1893 if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1894 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1895 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1896 "FS writes to output location %d with no matching attachment", it->first.first)) {
1897 pass = false;
1898 }
1899 it++;
1900 } else if (it == outputs.end() || it->first.first > attachment) {
1901 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1902 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1903 pass = false;
1904 }
1905 attachment++;
1906 } else {
1907 unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1908 unsigned att_type = get_format_type(color_formats[attachment]);
1909
1910 /* type checking */
1911 if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1912 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1913 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1914 "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1915 string_VkFormat(color_formats[attachment]),
1916 describe_type(fs, it->second.type_id).c_str())) {
1917 pass = false;
1918 }
1919 }
1920
1921 /* OK! */
1922 it++;
1923 attachment++;
1924 }
1925 }
1926
1927 return pass;
1928 }
1929
1930 /* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1931 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1932 * for example.
1933 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1934 * - NOT the shader input/output interfaces.
1935 *
1936 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1937 * converting parts of this to be generated from the machine-readable spec instead.
1938 */
mark_accessible_ids(shader_module const * src,spirv_inst_iter entrypoint,std::unordered_set<uint32_t> & ids)1939 static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1940 std::unordered_set<uint32_t> worklist;
1941 worklist.insert(entrypoint.word(2));
1942
1943 while (!worklist.empty()) {
1944 auto id_iter = worklist.begin();
1945 auto id = *id_iter;
1946 worklist.erase(id_iter);
1947
1948 auto insn = src->get_def(id);
1949 if (insn == src->end()) {
1950 /* id is something we didnt collect in build_def_index. that's OK -- we'll stumble
1951 * across all kinds of things here that we may not care about. */
1952 continue;
1953 }
1954
1955 /* try to add to the output set */
1956 if (!ids.insert(id).second) {
1957 continue; /* if we already saw this id, we don't want to walk it again. */
1958 }
1959
1960 switch (insn.opcode()) {
1961 case spv::OpFunction:
1962 /* scan whole body of the function, enlisting anything interesting */
1963 while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1964 switch (insn.opcode()) {
1965 case spv::OpLoad:
1966 case spv::OpAtomicLoad:
1967 case spv::OpAtomicExchange:
1968 case spv::OpAtomicCompareExchange:
1969 case spv::OpAtomicCompareExchangeWeak:
1970 case spv::OpAtomicIIncrement:
1971 case spv::OpAtomicIDecrement:
1972 case spv::OpAtomicIAdd:
1973 case spv::OpAtomicISub:
1974 case spv::OpAtomicSMin:
1975 case spv::OpAtomicUMin:
1976 case spv::OpAtomicSMax:
1977 case spv::OpAtomicUMax:
1978 case spv::OpAtomicAnd:
1979 case spv::OpAtomicOr:
1980 case spv::OpAtomicXor:
1981 worklist.insert(insn.word(3)); /* ptr */
1982 break;
1983 case spv::OpStore:
1984 case spv::OpAtomicStore:
1985 worklist.insert(insn.word(1)); /* ptr */
1986 break;
1987 case spv::OpAccessChain:
1988 case spv::OpInBoundsAccessChain:
1989 worklist.insert(insn.word(3)); /* base ptr */
1990 break;
1991 case spv::OpSampledImage:
1992 case spv::OpImageSampleImplicitLod:
1993 case spv::OpImageSampleExplicitLod:
1994 case spv::OpImageSampleDrefImplicitLod:
1995 case spv::OpImageSampleDrefExplicitLod:
1996 case spv::OpImageSampleProjImplicitLod:
1997 case spv::OpImageSampleProjExplicitLod:
1998 case spv::OpImageSampleProjDrefImplicitLod:
1999 case spv::OpImageSampleProjDrefExplicitLod:
2000 case spv::OpImageFetch:
2001 case spv::OpImageGather:
2002 case spv::OpImageDrefGather:
2003 case spv::OpImageRead:
2004 case spv::OpImage:
2005 case spv::OpImageQueryFormat:
2006 case spv::OpImageQueryOrder:
2007 case spv::OpImageQuerySizeLod:
2008 case spv::OpImageQuerySize:
2009 case spv::OpImageQueryLod:
2010 case spv::OpImageQueryLevels:
2011 case spv::OpImageQuerySamples:
2012 case spv::OpImageSparseSampleImplicitLod:
2013 case spv::OpImageSparseSampleExplicitLod:
2014 case spv::OpImageSparseSampleDrefImplicitLod:
2015 case spv::OpImageSparseSampleDrefExplicitLod:
2016 case spv::OpImageSparseSampleProjImplicitLod:
2017 case spv::OpImageSparseSampleProjExplicitLod:
2018 case spv::OpImageSparseSampleProjDrefImplicitLod:
2019 case spv::OpImageSparseSampleProjDrefExplicitLod:
2020 case spv::OpImageSparseFetch:
2021 case spv::OpImageSparseGather:
2022 case spv::OpImageSparseDrefGather:
2023 case spv::OpImageTexelPointer:
2024 worklist.insert(insn.word(3)); /* image or sampled image */
2025 break;
2026 case spv::OpImageWrite:
2027 worklist.insert(insn.word(1)); /* image -- different operand order to above */
2028 break;
2029 case spv::OpFunctionCall:
2030 for (auto i = 3; i < insn.len(); i++) {
2031 worklist.insert(insn.word(i)); /* fn itself, and all args */
2032 }
2033 break;
2034
2035 case spv::OpExtInst:
2036 for (auto i = 5; i < insn.len(); i++) {
2037 worklist.insert(insn.word(i)); /* operands to ext inst */
2038 }
2039 break;
2040 }
2041 }
2042 break;
2043 }
2044 }
2045 }
2046
2047 struct shader_stage_attributes {
2048 char const *const name;
2049 bool arrayed_input;
2050 };
2051
2052 static shader_stage_attributes shader_stage_attribs[] = {
2053 {"vertex shader", false},
2054 {"tessellation control shader", true},
2055 {"tessellation evaluation shader", false},
2056 {"geometry shader", true},
2057 {"fragment shader", false},
2058 };
2059
validate_push_constant_block_against_pipeline(layer_data * my_data,VkDevice dev,std::vector<VkPushConstantRange> const * pushConstantRanges,shader_module const * src,spirv_inst_iter type,VkShaderStageFlagBits stage)2060 static bool validate_push_constant_block_against_pipeline(layer_data *my_data, VkDevice dev,
2061 std::vector<VkPushConstantRange> const *pushConstantRanges,
2062 shader_module const *src, spirv_inst_iter type,
2063 VkShaderStageFlagBits stage) {
2064 bool pass = true;
2065
2066 /* strip off ptrs etc */
2067 type = get_struct_type(src, type, false);
2068 assert(type != src->end());
2069
2070 /* validate directly off the offsets. this isn't quite correct for arrays
2071 * and matrices, but is a good first step. TODO: arrays, matrices, weird
2072 * sizes */
2073 for (auto insn : *src) {
2074 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2075
2076 if (insn.word(3) == spv::DecorationOffset) {
2077 unsigned offset = insn.word(4);
2078 auto size = 4; /* bytes; TODO: calculate this based on the type */
2079
2080 bool found_range = false;
2081 for (auto const &range : *pushConstantRanges) {
2082 if (range.offset <= offset && range.offset + range.size >= offset + size) {
2083 found_range = true;
2084
2085 if ((range.stageFlags & stage) == 0) {
2086 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2087 /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2088 "Push constant range covering variable starting at "
2089 "offset %u not accessible from stage %s",
2090 offset, string_VkShaderStageFlagBits(stage))) {
2091 pass = false;
2092 }
2093 }
2094
2095 break;
2096 }
2097 }
2098
2099 if (!found_range) {
2100 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2101 /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2102 "Push constant range covering variable starting at "
2103 "offset %u not declared in layout",
2104 offset)) {
2105 pass = false;
2106 }
2107 }
2108 }
2109 }
2110 }
2111
2112 return pass;
2113 }
2114
validate_push_constant_usage(layer_data * my_data,VkDevice dev,std::vector<VkPushConstantRange> const * pushConstantRanges,shader_module const * src,std::unordered_set<uint32_t> accessible_ids,VkShaderStageFlagBits stage)2115 static bool validate_push_constant_usage(layer_data *my_data, VkDevice dev,
2116 std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
2117 std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2118 bool pass = true;
2119
2120 for (auto id : accessible_ids) {
2121 auto def_insn = src->get_def(id);
2122 if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2123 pass = validate_push_constant_block_against_pipeline(my_data, dev, pushConstantRanges, src,
2124 src->get_def(def_insn.word(1)), stage) &&
2125 pass;
2126 }
2127 }
2128
2129 return pass;
2130 }
2131
2132 // For given pipelineLayout verify that the setLayout at slot.first
2133 // has the requested binding at slot.second
get_descriptor_binding(layer_data * my_data,vector<VkDescriptorSetLayout> * pipelineLayout,descriptor_slot_t slot)2134 static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, vector<VkDescriptorSetLayout> *pipelineLayout, descriptor_slot_t slot) {
2135
2136 if (!pipelineLayout)
2137 return nullptr;
2138
2139 if (slot.first >= pipelineLayout->size())
2140 return nullptr;
2141
2142 auto const layout_node = my_data->descriptorSetLayoutMap[(*pipelineLayout)[slot.first]];
2143
2144 auto bindingIt = layout_node->bindingToIndexMap.find(slot.second);
2145 if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL))
2146 return nullptr;
2147
2148 assert(bindingIt->second < layout_node->createInfo.bindingCount);
2149 return &layout_node->createInfo.pBindings[bindingIt->second];
2150 }
2151
2152 // Block of code at start here for managing/tracking Pipeline state that this layer cares about
2153
2154 static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2155
2156 // TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2157 // Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2158 // to that same cmd buffer by separate thread are not changing state from underneath us
2159 // Track the last cmd buffer touched by this thread
2160
hasDrawCmd(GLOBAL_CB_NODE * pCB)2161 static VkBool32 hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2162 for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2163 if (pCB->drawCount[i])
2164 return VK_TRUE;
2165 }
2166 return VK_FALSE;
2167 }
2168
2169 // Check object status for selected flag state
validate_status(layer_data * my_data,GLOBAL_CB_NODE * pNode,CBStatusFlags enable_mask,CBStatusFlags status_mask,CBStatusFlags status_flag,VkFlags msg_flags,DRAW_STATE_ERROR error_code,const char * fail_msg)2170 static VkBool32 validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags enable_mask, CBStatusFlags status_mask,
2171 CBStatusFlags status_flag, VkFlags msg_flags, DRAW_STATE_ERROR error_code, const char *fail_msg) {
2172 // If non-zero enable mask is present, check it against status but if enable_mask
2173 // is 0 then no enable required so we should always just check status
2174 if ((!enable_mask) || (enable_mask & pNode->status)) {
2175 if ((pNode->status & status_mask) != status_flag) {
2176 // TODO : How to pass dispatchable objects as srcObject? Here src obj should be cmd buffer
2177 return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, error_code,
2178 "DS", "CB object %#" PRIxLEAST64 ": %s", (uint64_t)(pNode->commandBuffer), fail_msg);
2179 }
2180 }
2181 return VK_FALSE;
2182 }
2183
2184 // Retrieve pipeline node ptr for given pipeline object
getPipeline(layer_data * my_data,const VkPipeline pipeline)2185 static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
2186 if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
2187 return NULL;
2188 }
2189 return my_data->pipelineMap[pipeline];
2190 }
2191
2192 // Return VK_TRUE if for a given PSO, the given state enum is dynamic, else return VK_FALSE
isDynamic(const PIPELINE_NODE * pPipeline,const VkDynamicState state)2193 static VkBool32 isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2194 if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2195 for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2196 if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2197 return VK_TRUE;
2198 }
2199 }
2200 return VK_FALSE;
2201 }
2202
2203 // Validate state stored as flags at time of draw call
validate_draw_state_flags(layer_data * my_data,GLOBAL_CB_NODE * pCB,VkBool32 indexedDraw)2204 static VkBool32 validate_draw_state_flags(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
2205 VkBool32 result;
2206 result =
2207 validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_VIEWPORT_SET, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2208 DRAWSTATE_VIEWPORT_NOT_BOUND, "Dynamic viewport state not set for this command buffer");
2209 result |=
2210 validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_SCISSOR_SET, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2211 DRAWSTATE_SCISSOR_NOT_BOUND, "Dynamic scissor state not set for this command buffer");
2212 result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_LINE_WIDTH_SET, CBSTATUS_LINE_WIDTH_SET,
2213 VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_LINE_WIDTH_NOT_BOUND,
2214 "Dynamic line width state not set for this command buffer");
2215 result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_DEPTH_BIAS_SET, CBSTATUS_DEPTH_BIAS_SET,
2216 VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BIAS_NOT_BOUND,
2217 "Dynamic depth bias state not set for this command buffer");
2218 result |= validate_status(my_data, pCB, CBSTATUS_COLOR_BLEND_WRITE_ENABLE, CBSTATUS_BLEND_SET, CBSTATUS_BLEND_SET,
2219 VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_BLEND_NOT_BOUND,
2220 "Dynamic blend object state not set for this command buffer");
2221 result |= validate_status(my_data, pCB, CBSTATUS_DEPTH_WRITE_ENABLE, CBSTATUS_DEPTH_BOUNDS_SET, CBSTATUS_DEPTH_BOUNDS_SET,
2222 VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND,
2223 "Dynamic depth bounds state not set for this command buffer");
2224 result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_READ_MASK_SET,
2225 CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2226 "Dynamic stencil read mask state not set for this command buffer");
2227 result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_WRITE_MASK_SET,
2228 CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2229 "Dynamic stencil write mask state not set for this command buffer");
2230 result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_REFERENCE_SET,
2231 CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2232 "Dynamic stencil reference state not set for this command buffer");
2233 if (indexedDraw)
2234 result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_INDEX_BUFFER_BOUND, CBSTATUS_INDEX_BUFFER_BOUND,
2235 VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2236 "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2237 return result;
2238 }
2239
2240 // Verify attachment reference compatibility according to spec
2241 // If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2242 // If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2243 // to make sure that format and samples counts match.
2244 // If not, they are not compatible.
attachment_references_compatible(const uint32_t index,const VkAttachmentReference * pPrimary,const uint32_t primaryCount,const VkAttachmentDescription * pPrimaryAttachments,const VkAttachmentReference * pSecondary,const uint32_t secondaryCount,const VkAttachmentDescription * pSecondaryAttachments)2245 static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2246 const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2247 const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2248 const VkAttachmentDescription *pSecondaryAttachments) {
2249 if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2250 if (VK_ATTACHMENT_UNUSED != pSecondary[index].attachment)
2251 return false;
2252 } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2253 if (VK_ATTACHMENT_UNUSED != pPrimary[index].attachment)
2254 return false;
2255 } else { // format and sample count must match
2256 if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2257 pSecondaryAttachments[pSecondary[index].attachment].format) &&
2258 (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2259 pSecondaryAttachments[pSecondary[index].attachment].samples))
2260 return true;
2261 }
2262 // Format and sample counts didn't match
2263 return false;
2264 }
2265
2266 // For give primary and secondary RenderPass objects, verify that they're compatible
verify_renderpass_compatibility(layer_data * my_data,const VkRenderPass primaryRP,const VkRenderPass secondaryRP,string & errorMsg)2267 static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2268 string &errorMsg) {
2269 stringstream errorStr;
2270 if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2271 errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2272 errorMsg = errorStr.str();
2273 return false;
2274 } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2275 errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2276 errorMsg = errorStr.str();
2277 return false;
2278 }
2279 // Trivial pass case is exact same RP
2280 if (primaryRP == secondaryRP) {
2281 return true;
2282 }
2283 const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2284 const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2285 if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2286 errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2287 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2288 errorMsg = errorStr.str();
2289 return false;
2290 }
2291 uint32_t spIndex = 0;
2292 for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2293 // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2294 uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2295 uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2296 uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2297 for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2298 if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2299 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2300 secondaryColorCount, secondaryRPCI->pAttachments)) {
2301 errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2302 errorMsg = errorStr.str();
2303 return false;
2304 } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2305 primaryColorCount, primaryRPCI->pAttachments,
2306 secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2307 secondaryColorCount, secondaryRPCI->pAttachments)) {
2308 errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2309 errorMsg = errorStr.str();
2310 return false;
2311 } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2312 primaryColorCount, primaryRPCI->pAttachments,
2313 secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2314 secondaryColorCount, secondaryRPCI->pAttachments)) {
2315 errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex
2316 << " are not compatible.";
2317 errorMsg = errorStr.str();
2318 return false;
2319 }
2320 }
2321 uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2322 uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2323 uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2324 for (uint32_t i = 0; i < inputMax; ++i) {
2325 if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2326 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2327 secondaryColorCount, secondaryRPCI->pAttachments)) {
2328 errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2329 errorMsg = errorStr.str();
2330 return false;
2331 }
2332 }
2333 }
2334 return true;
2335 }
2336
2337 // For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
verify_set_layout_compatibility(layer_data * my_data,const SET_NODE * pSet,const VkPipelineLayout layout,const uint32_t layoutIndex,string & errorMsg)2338 static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2339 const uint32_t layoutIndex, string &errorMsg) {
2340 stringstream errorStr;
2341 auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2342 if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2343 errorStr << "invalid VkPipelineLayout (" << layout << ")";
2344 errorMsg = errorStr.str();
2345 return false;
2346 }
2347 if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2348 errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2349 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2350 << ", but you're attempting to bind set to index " << layoutIndex;
2351 errorMsg = errorStr.str();
2352 return false;
2353 }
2354 // Get the specific setLayout from PipelineLayout that overlaps this set
2355 LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2356 if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
2357 return true;
2358 }
2359 size_t descriptorCount = pLayoutNode->descriptorTypes.size();
2360 if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
2361 errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
2362 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
2363 << " descriptors.";
2364 errorMsg = errorStr.str();
2365 return false; // trivial fail case
2366 }
2367 // Now need to check set against corresponding pipelineLayout to verify compatibility
2368 for (size_t i = 0; i < descriptorCount; ++i) {
2369 // Need to verify that layouts are identically defined
2370 // TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
2371 // do we also need to check immutable samplers?
2372 if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
2373 errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
2374 << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
2375 << "' but corresponding descriptor from pipelineLayout is type '"
2376 << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
2377 errorMsg = errorStr.str();
2378 return false;
2379 }
2380 if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
2381 errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
2382 << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
2383 errorMsg = errorStr.str();
2384 return false;
2385 }
2386 }
2387 return true;
2388 }
2389
2390 // Validate that data for each specialization entry is fully contained within the buffer.
validate_specialization_offsets(layer_data * my_data,VkPipelineShaderStageCreateInfo const * info)2391 static VkBool32 validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2392 VkBool32 pass = VK_TRUE;
2393
2394 VkSpecializationInfo const *spec = info->pSpecializationInfo;
2395
2396 if (spec) {
2397 for (auto i = 0u; i < spec->mapEntryCount; i++) {
2398 if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2399 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2400 /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2401 "Specialization entry %u (for constant id %u) references memory outside provided "
2402 "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2403 " bytes provided)",
2404 i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2405 spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2406
2407 pass = VK_FALSE;
2408 }
2409 }
2410 }
2411 }
2412
2413 return pass;
2414 }
2415
descriptor_type_match(layer_data * my_data,shader_module const * module,uint32_t type_id,VkDescriptorType descriptor_type,unsigned & descriptor_count)2416 static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2417 VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2418 auto type = module->get_def(type_id);
2419
2420 descriptor_count = 1;
2421
2422 /* Strip off any array or ptrs. Where we remove array levels, adjust the
2423 * descriptor count for each dimension. */
2424 while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2425 if (type.opcode() == spv::OpTypeArray) {
2426 descriptor_count *= get_constant_value(module, type.word(3));
2427 type = module->get_def(type.word(2));
2428 }
2429 else {
2430 type = module->get_def(type.word(3));
2431 }
2432 }
2433
2434 switch (type.opcode()) {
2435 case spv::OpTypeStruct: {
2436 for (auto insn : *module) {
2437 if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2438 if (insn.word(2) == spv::DecorationBlock) {
2439 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2440 descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2441 } else if (insn.word(2) == spv::DecorationBufferBlock) {
2442 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2443 descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2444 }
2445 }
2446 }
2447
2448 /* Invalid */
2449 return false;
2450 }
2451
2452 case spv::OpTypeSampler:
2453 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2454
2455 case spv::OpTypeSampledImage:
2456 return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2457
2458 case spv::OpTypeImage: {
2459 /* Many descriptor types backing image types-- depends on dimension
2460 * and whether the image will be used with a sampler. SPIRV for
2461 * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2462 * runtime is unacceptable.
2463 */
2464 auto dim = type.word(3);
2465 auto sampled = type.word(7);
2466
2467 if (dim == spv::DimSubpassData) {
2468 return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2469 } else if (dim == spv::DimBuffer) {
2470 if (sampled == 1) {
2471 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2472 } else {
2473 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2474 }
2475 } else if (sampled == 1) {
2476 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2477 } else {
2478 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2479 }
2480 }
2481
2482 /* We shouldn't really see any other junk types -- but if we do, they're
2483 * a mismatch.
2484 */
2485 default:
2486 return false; /* Mismatch */
2487 }
2488 }
2489
require_feature(layer_data * my_data,VkBool32 feature,char const * feature_name)2490 static VkBool32 require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2491 if (!feature) {
2492 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2493 /* dev */ 0, __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2494 "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2495 "enabled on the device",
2496 feature_name)) {
2497 return false;
2498 }
2499 }
2500
2501 return true;
2502 }
2503
validate_shader_capabilities(layer_data * my_data,VkDevice dev,shader_module const * src)2504 static VkBool32 validate_shader_capabilities(layer_data *my_data, VkDevice dev, shader_module const *src)
2505 {
2506 VkBool32 pass = VK_TRUE;
2507
2508 auto enabledFeatures = &my_data->physDevProperties.features;
2509
2510 for (auto insn : *src) {
2511 if (insn.opcode() == spv::OpCapability) {
2512 switch (insn.word(1)) {
2513 case spv::CapabilityMatrix:
2514 case spv::CapabilityShader:
2515 case spv::CapabilityInputAttachment:
2516 case spv::CapabilitySampled1D:
2517 case spv::CapabilityImage1D:
2518 case spv::CapabilitySampledBuffer:
2519 case spv::CapabilityImageBuffer:
2520 case spv::CapabilityImageQuery:
2521 case spv::CapabilityDerivativeControl:
2522 // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2523 break;
2524
2525 case spv::CapabilityGeometry:
2526 pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2527 break;
2528
2529 case spv::CapabilityTessellation:
2530 pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2531 break;
2532
2533 case spv::CapabilityFloat64:
2534 pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2535 break;
2536
2537 case spv::CapabilityInt64:
2538 pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2539 break;
2540
2541 case spv::CapabilityTessellationPointSize:
2542 case spv::CapabilityGeometryPointSize:
2543 pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2544 "shaderTessellationAndGeometryPointSize");
2545 break;
2546
2547 case spv::CapabilityImageGatherExtended:
2548 pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2549 break;
2550
2551 case spv::CapabilityStorageImageMultisample:
2552 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2553 break;
2554
2555 case spv::CapabilityUniformBufferArrayDynamicIndexing:
2556 pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2557 "shaderUniformBufferArrayDynamicIndexing");
2558 break;
2559
2560 case spv::CapabilitySampledImageArrayDynamicIndexing:
2561 pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2562 "shaderSampledImageArrayDynamicIndexing");
2563 break;
2564
2565 case spv::CapabilityStorageBufferArrayDynamicIndexing:
2566 pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2567 "shaderStorageBufferArrayDynamicIndexing");
2568 break;
2569
2570 case spv::CapabilityStorageImageArrayDynamicIndexing:
2571 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2572 "shaderStorageImageArrayDynamicIndexing");
2573 break;
2574
2575 case spv::CapabilityClipDistance:
2576 pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2577 break;
2578
2579 case spv::CapabilityCullDistance:
2580 pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2581 break;
2582
2583 case spv::CapabilityImageCubeArray:
2584 pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2585 break;
2586
2587 case spv::CapabilitySampleRateShading:
2588 pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2589 break;
2590
2591 case spv::CapabilitySparseResidency:
2592 pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2593 break;
2594
2595 case spv::CapabilityMinLod:
2596 pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2597 break;
2598
2599 case spv::CapabilitySampledCubeArray:
2600 pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2601 break;
2602
2603 case spv::CapabilityImageMSArray:
2604 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2605 break;
2606
2607 case spv::CapabilityStorageImageExtendedFormats:
2608 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2609 "shaderStorageImageExtendedFormats");
2610 break;
2611
2612 case spv::CapabilityInterpolationFunction:
2613 pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2614 break;
2615
2616 case spv::CapabilityStorageImageReadWithoutFormat:
2617 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2618 "shaderStorageImageReadWithoutFormat");
2619 break;
2620
2621 case spv::CapabilityStorageImageWriteWithoutFormat:
2622 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2623 "shaderStorageImageWriteWithoutFormat");
2624 break;
2625
2626 case spv::CapabilityMultiViewport:
2627 pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2628 break;
2629
2630 default:
2631 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /* dev */0,
2632 __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2633 "Shader declares capability %u, not supported in Vulkan.",
2634 insn.word(1)))
2635 pass = VK_FALSE;
2636 break;
2637 }
2638 }
2639 }
2640
2641 return pass;
2642 }
2643
2644
2645 // Validate that the shaders used by the given pipeline
2646 // As a side effect this function also records the sets that are actually used by the pipeline
validate_pipeline_shaders(layer_data * my_data,VkDevice dev,PIPELINE_NODE * pPipeline)2647 static VkBool32 validate_pipeline_shaders(layer_data *my_data, VkDevice dev, PIPELINE_NODE *pPipeline) {
2648 VkGraphicsPipelineCreateInfo const *pCreateInfo = &pPipeline->graphicsPipelineCI;
2649 /* We seem to allow pipeline stages to be specified out of order, so collect and identify them
2650 * before trying to do anything more: */
2651 int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2652 int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2653
2654 shader_module *shaders[5];
2655 memset(shaders, 0, sizeof(shaders));
2656 spirv_inst_iter entrypoints[5];
2657 memset(entrypoints, 0, sizeof(entrypoints));
2658 RENDER_PASS_NODE const *rp = 0;
2659 VkPipelineVertexInputStateCreateInfo const *vi = 0;
2660 VkBool32 pass = VK_TRUE;
2661
2662 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2663 VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
2664 if (pStage->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO) {
2665
2666 if ((pStage->stage & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
2667 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) == 0) {
2668 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2669 /*dev*/ 0, __LINE__, SHADER_CHECKER_UNKNOWN_STAGE, "SC", "Unknown shader stage %d", pStage->stage)) {
2670 pass = VK_FALSE;
2671 }
2672 } else {
2673 pass = validate_specialization_offsets(my_data, pStage) && pass;
2674
2675 auto stage_id = get_shader_stage_id(pStage->stage);
2676 auto module = my_data->shaderModuleMap[pStage->module].get();
2677 shaders[stage_id] = module;
2678
2679 /* find the entrypoint */
2680 entrypoints[stage_id] = find_entrypoint(module, pStage->pName, pStage->stage);
2681 if (entrypoints[stage_id] == module->end()) {
2682 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2683 /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2684 "No entrypoint found named `%s` for stage %s", pStage->pName,
2685 string_VkShaderStageFlagBits(pStage->stage))) {
2686 pass = VK_FALSE;
2687 }
2688 }
2689
2690 /* validate shader capabilities against enabled device features */
2691 pass = validate_shader_capabilities(my_data, dev, module) && pass;
2692
2693 /* mark accessible ids */
2694 std::unordered_set<uint32_t> accessible_ids;
2695 mark_accessible_ids(module, entrypoints[stage_id], accessible_ids);
2696
2697 /* validate descriptor set layout against what the entrypoint actually uses */
2698 std::map<descriptor_slot_t, interface_var> descriptor_uses;
2699 collect_interface_by_descriptor_slot(my_data, dev, module, accessible_ids, descriptor_uses);
2700
2701 auto layouts = pCreateInfo->layout != VK_NULL_HANDLE
2702 ? &(my_data->pipelineLayoutMap[pCreateInfo->layout].descriptorSetLayouts)
2703 : nullptr;
2704
2705 for (auto use : descriptor_uses) {
2706 // As a side-effect of this function, capture which sets are used by the pipeline
2707 pPipeline->active_sets.insert(use.first.first);
2708
2709 /* find the matching binding */
2710 auto binding = get_descriptor_binding(my_data, layouts, use.first);
2711 unsigned required_descriptor_count;
2712
2713 if (!binding) {
2714 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2715 /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2716 "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2717 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2718 pass = VK_FALSE;
2719 }
2720 } else if (~binding->stageFlags & pStage->stage) {
2721 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2722 /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2723 "Shader uses descriptor slot %u.%u (used "
2724 "as type `%s`) but descriptor not "
2725 "accessible from stage %s",
2726 use.first.first, use.first.second,
2727 describe_type(module, use.second.type_id).c_str(),
2728 string_VkShaderStageFlagBits(pStage->stage))) {
2729 pass = VK_FALSE;
2730 }
2731 } else if (!descriptor_type_match(my_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) {
2732 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2733 /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2734 "Type mismatch on descriptor slot "
2735 "%u.%u (used as type `%s`) but "
2736 "descriptor of type %s",
2737 use.first.first, use.first.second,
2738 describe_type(module, use.second.type_id).c_str(),
2739 string_VkDescriptorType(binding->descriptorType))) {
2740 pass = VK_FALSE;
2741 }
2742 } else if (binding->descriptorCount < required_descriptor_count) {
2743 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2744 /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2745 "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2746 required_descriptor_count, use.first.first, use.first.second,
2747 describe_type(module, use.second.type_id).c_str(),
2748 binding->descriptorCount)) {
2749 pass = VK_FALSE;
2750 }
2751 }
2752 }
2753
2754 /* validate push constant usage */
2755 pass =
2756 validate_push_constant_usage(my_data, dev, &my_data->pipelineLayoutMap[pCreateInfo->layout].pushConstantRanges,
2757 module, accessible_ids, pStage->stage) &&
2758 pass;
2759 }
2760 }
2761 }
2762
2763 if (pCreateInfo->renderPass != VK_NULL_HANDLE)
2764 rp = my_data->renderPassMap[pCreateInfo->renderPass];
2765
2766 vi = pCreateInfo->pVertexInputState;
2767
2768 if (vi) {
2769 pass = validate_vi_consistency(my_data, dev, vi) && pass;
2770 }
2771
2772 if (shaders[vertex_stage]) {
2773 pass = validate_vi_against_vs_inputs(my_data, dev, vi, shaders[vertex_stage], entrypoints[vertex_stage]) && pass;
2774 }
2775
2776 /* TODO: enforce rules about present combinations of shaders */
2777 int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2778 int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2779
2780 while (!shaders[producer] && producer != fragment_stage) {
2781 producer++;
2782 consumer++;
2783 }
2784
2785 for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2786 assert(shaders[producer]);
2787 if (shaders[consumer]) {
2788 pass = validate_interface_between_stages(my_data, dev, shaders[producer], entrypoints[producer],
2789 shader_stage_attribs[producer].name, shaders[consumer], entrypoints[consumer],
2790 shader_stage_attribs[consumer].name,
2791 shader_stage_attribs[consumer].arrayed_input) &&
2792 pass;
2793
2794 producer = consumer;
2795 }
2796 }
2797
2798 if (shaders[fragment_stage] && rp) {
2799 pass = validate_fs_outputs_against_render_pass(my_data, dev, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2800 pCreateInfo->subpass) &&
2801 pass;
2802 }
2803
2804 return pass;
2805 }
2806
2807 // Return Set node ptr for specified set or else NULL
getSetNode(layer_data * my_data,const VkDescriptorSet set)2808 static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2809 if (my_data->setMap.find(set) == my_data->setMap.end()) {
2810 return NULL;
2811 }
2812 return my_data->setMap[set];
2813 }
2814 // For the given command buffer, verify that for each set set in activeSetNodes
2815 // that any dynamic descriptor in that set has a valid dynamic offset bound.
2816 // To be valid, the dynamic offset combined with the offset and range from its
2817 // descriptor update must not overflow the size of its buffer being updated
validate_dynamic_offsets(layer_data * my_data,const GLOBAL_CB_NODE * pCB,const vector<SET_NODE * > activeSetNodes)2818 static VkBool32 validate_dynamic_offsets(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const vector<SET_NODE *> activeSetNodes) {
2819 VkBool32 result = VK_FALSE;
2820
2821 VkWriteDescriptorSet *pWDS = NULL;
2822 uint32_t dynOffsetIndex = 0;
2823 VkDeviceSize bufferSize = 0;
2824 for (auto set_node : activeSetNodes) {
2825 for (uint32_t i = 0; i < set_node->descriptorCount; ++i) {
2826 // TODO: Add validation for descriptors dynamically skipped in shader
2827 if (set_node->ppDescriptors[i] != NULL) {
2828 switch (set_node->ppDescriptors[i]->sType) {
2829 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2830 pWDS = (VkWriteDescriptorSet *)set_node->ppDescriptors[i];
2831 if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2832 (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2833 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2834 bufferSize = my_data->bufferMap[pWDS->pBufferInfo[j].buffer].create_info->size;
2835 uint32_t dynOffset = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2836 if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2837 if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2838 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2839 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2840 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2841 DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2842 "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2843 "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2844 "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2845 ") which has a size of %#" PRIxLEAST64 ".",
2846 reinterpret_cast<const uint64_t &>(set_node->set), i,
2847 pCB->dynamicOffsets[dynOffsetIndex], pWDS->pBufferInfo[j].offset,
2848 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2849 }
2850 } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2851 result |= log_msg(
2852 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2853 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2854 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2855 "DS",
2856 "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2857 "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2858 " from its update, this oversteps its buffer "
2859 "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2860 reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2861 pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2862 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2863 } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2864 result |= log_msg(
2865 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2866 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2867 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2868 "DS",
2869 "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2870 "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2871 " from its update, this oversteps its buffer "
2872 "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2873 reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2874 pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2875 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2876 }
2877 dynOffsetIndex++;
2878 i += j; // Advance i to end of this set of descriptors (++i at end of for loop will move 1 index past
2879 // last of these descriptors)
2880 }
2881 }
2882 break;
2883 default: // Currently only shadowing Write update nodes so shouldn't get here
2884 assert(0);
2885 continue;
2886 }
2887 }
2888 }
2889 }
2890 return result;
2891 }
2892
2893 // Validate overall state at the time of a draw call
validate_draw_state(layer_data * my_data,GLOBAL_CB_NODE * pCB,VkBool32 indexedDraw)2894 static VkBool32 validate_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
2895 // First check flag states
2896 VkBool32 result = validate_draw_state_flags(my_data, pCB, indexedDraw);
2897 PIPELINE_NODE *pPipe = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
2898 // Now complete other state checks
2899 // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2900 // There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2901 // We should have that check separately and then gate this check based on that check
2902 if (pPipe) {
2903 auto const &state = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS];
2904 if (state.pipelineLayout) {
2905 string errorString;
2906 // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2907 vector<SET_NODE *> activeSetNodes;
2908 for (auto setIndex : pPipe->active_sets) {
2909 // If valid set is not bound throw an error
2910 if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2911 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2912 __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2913 "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2914 (uint64_t)pPipe->pipeline, setIndex);
2915 } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2916 pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2917 // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2918 VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2919 result |= log_msg(
2920 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2921 (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2922 "VkDescriptorSet (%#" PRIxLEAST64
2923 ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2924 (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2925 } else { // Valid set is bound and layout compatible, validate that it's updated and verify any dynamic offsets
2926 // Pull the set node
2927 SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
2928 // Save vector of all active sets to verify dynamicOffsets below
2929 activeSetNodes.push_back(pSet);
2930 // Make sure set has been updated
2931 if (!pSet->pUpdateStructs) {
2932 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2933 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
2934 DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2935 "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2936 "this will result in undefined behavior.",
2937 (uint64_t)pSet->set);
2938 }
2939 }
2940 }
2941 // For each dynamic descriptor, make sure dynamic offset doesn't overstep buffer
2942 if (!state.dynamicOffsets.empty())
2943 result |= validate_dynamic_offsets(my_data, pCB, activeSetNodes);
2944 }
2945 // Verify Vtx binding
2946 if (pPipe->vertexBindingDescriptions.size() > 0) {
2947 for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2948 if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2949 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2950 __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2951 "The Pipeline State Object (%#" PRIxLEAST64
2952 ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2953 " should be set via vkCmdBindVertexBuffers.",
2954 (uint64_t)pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline, i);
2955 }
2956 }
2957 } else {
2958 if (!pCB->currentDrawData.buffers.empty()) {
2959 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2960 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2961 "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
2962 ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
2963 (uint64_t)pCB->commandBuffer, (uint64_t)pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
2964 }
2965 }
2966 // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2967 // Skip check if rasterization is disabled or there is no viewport.
2968 if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2969 !pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) &&
2970 pPipe->graphicsPipelineCI.pViewportState) {
2971 VkBool32 dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
2972 VkBool32 dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
2973 if (dynViewport) {
2974 if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
2975 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2976 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2977 "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2978 ", but PSO viewportCount is %u. These counts must match.",
2979 pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
2980 }
2981 }
2982 if (dynScissor) {
2983 if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
2984 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2985 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2986 "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2987 ", but PSO scissorCount is %u. These counts must match.",
2988 pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
2989 }
2990 }
2991 }
2992 }
2993 return result;
2994 }
2995
2996 // Verify that create state for a pipeline is valid
verifyPipelineCreateState(layer_data * my_data,const VkDevice device,std::vector<PIPELINE_NODE * > pPipelines,int pipelineIndex)2997 static VkBool32 verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
2998 int pipelineIndex) {
2999 VkBool32 skipCall = VK_FALSE;
3000
3001 PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3002
3003 // If create derivative bit is set, check that we've specified a base
3004 // pipeline correctly, and that the base pipeline was created to allow
3005 // derivatives.
3006 if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3007 PIPELINE_NODE *pBasePipeline = nullptr;
3008 if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3009 (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3010 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3011 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3012 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3013 } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3014 if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3015 skipCall |=
3016 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3017 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3018 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3019 } else {
3020 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3021 }
3022 } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3023 pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3024 }
3025
3026 if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3027 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3028 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3029 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3030 }
3031 }
3032
3033 if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3034 if (!my_data->physDevProperties.features.independentBlend) {
3035 if (pPipeline->attachments.size() > 0) {
3036 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3037 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3038 if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3039 (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3040 (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3041 (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3042 (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3043 (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3044 (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3045 (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3046 skipCall |=
3047 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3048 DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3049 "enabled, all elements of pAttachments must be identical");
3050 }
3051 }
3052 }
3053 }
3054 if (!my_data->physDevProperties.features.logicOp &&
3055 (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3056 skipCall |=
3057 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3058 DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3059 "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3060 }
3061 if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3062 ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3063 (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3064 skipCall |=
3065 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3066 DRAWSTATE_INVALID_LOGIC_OP, "DS",
3067 "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3068 }
3069 }
3070
3071 // Ensure the subpass index is valid. If not, then validate_pipeline_shaders
3072 // produces nonsense errors that confuse users. Other layers should already
3073 // emit errors for renderpass being invalid.
3074 auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
3075 if (rp_data != my_data->renderPassMap.end() &&
3076 pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
3077 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3078 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3079 "is out of range for this renderpass (0..%u)",
3080 pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
3081 }
3082
3083 if (!validate_pipeline_shaders(my_data, device, pPipeline)) {
3084 skipCall = VK_TRUE;
3085 }
3086 // VS is required
3087 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3088 skipCall |=
3089 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3090 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3091 }
3092 // Either both or neither TC/TE shaders should be defined
3093 if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3094 ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3095 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3096 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3097 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3098 }
3099 // Compute shaders should be specified independent of Gfx shaders
3100 if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3101 (pPipeline->active_shaders &
3102 (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3103 VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3104 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3105 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3106 "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3107 }
3108 // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3109 // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3110 if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3111 (pPipeline->iaStateCI.topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3112 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3113 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3114 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3115 "topology for tessellation pipelines");
3116 }
3117 if (pPipeline->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3118 if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3119 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3120 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3121 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3122 "topology is only valid for tessellation pipelines");
3123 }
3124 if (!pPipeline->tessStateCI.patchControlPoints || (pPipeline->tessStateCI.patchControlPoints > 32)) {
3125 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3126 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3127 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3128 "topology used with patchControlPoints value %u."
3129 " patchControlPoints should be >0 and <=32.",
3130 pPipeline->tessStateCI.patchControlPoints);
3131 }
3132 }
3133 // Viewport state must be included if rasterization is enabled.
3134 // If the viewport state is included, the viewport and scissor counts should always match.
3135 // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3136 if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3137 !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3138 if (!pPipeline->graphicsPipelineCI.pViewportState) {
3139 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3140 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3141 "and scissors are dynamic PSO must include "
3142 "viewportCount and scissorCount in pViewportState.");
3143 } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3144 pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3145 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3146 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3147 "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3148 pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
3149 } else {
3150 // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3151 VkBool32 dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3152 VkBool32 dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3153 if (!dynViewport) {
3154 if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3155 !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3156 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3157 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3158 "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3159 "must either include pViewports data, or include viewport in pDynamicState and set it with "
3160 "vkCmdSetViewport().",
3161 pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3162 }
3163 }
3164 if (!dynScissor) {
3165 if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3166 !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3167 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3168 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3169 "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3170 "must either include pScissors data, or include scissor in pDynamicState and set it with "
3171 "vkCmdSetScissor().",
3172 pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3173 }
3174 }
3175 }
3176 }
3177 return skipCall;
3178 }
3179
3180 // Init the pipeline mapping info based on pipeline create info LL tree
3181 // Threading note : Calls to this function should wrapped in mutex
3182 // TODO : this should really just be in the constructor for PIPELINE_NODE
initGraphicsPipeline(layer_data * dev_data,const VkGraphicsPipelineCreateInfo * pCreateInfo)3183 static PIPELINE_NODE *initGraphicsPipeline(layer_data *dev_data, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
3184 PIPELINE_NODE *pPipeline = new PIPELINE_NODE;
3185
3186 // First init create info
3187 memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
3188
3189 size_t bufferSize = 0;
3190 const VkPipelineVertexInputStateCreateInfo *pVICI = NULL;
3191 const VkPipelineColorBlendStateCreateInfo *pCBCI = NULL;
3192
3193 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
3194 const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
3195
3196 switch (pPSSCI->stage) {
3197 case VK_SHADER_STAGE_VERTEX_BIT:
3198 memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3199 pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
3200 break;
3201 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
3202 memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3203 pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3204 break;
3205 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
3206 memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3207 pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
3208 break;
3209 case VK_SHADER_STAGE_GEOMETRY_BIT:
3210 memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3211 pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
3212 break;
3213 case VK_SHADER_STAGE_FRAGMENT_BIT:
3214 memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3215 pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
3216 break;
3217 case VK_SHADER_STAGE_COMPUTE_BIT:
3218 // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
3219 pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
3220 break;
3221 default:
3222 // TODO : Flag error
3223 break;
3224 }
3225 }
3226 // Copy over GraphicsPipelineCreateInfo structure embedded pointers
3227 if (pCreateInfo->stageCount != 0) {
3228 pPipeline->graphicsPipelineCI.pStages = new VkPipelineShaderStageCreateInfo[pCreateInfo->stageCount];
3229 bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
3230 memcpy((void *)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
3231 }
3232 if (pCreateInfo->pVertexInputState != NULL) {
3233 pPipeline->vertexInputCI = *pCreateInfo->pVertexInputState;
3234 // Copy embedded ptrs
3235 pVICI = pCreateInfo->pVertexInputState;
3236 if (pVICI->vertexBindingDescriptionCount) {
3237 pPipeline->vertexBindingDescriptions = std::vector<VkVertexInputBindingDescription>(
3238 pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
3239 }
3240 if (pVICI->vertexAttributeDescriptionCount) {
3241 pPipeline->vertexAttributeDescriptions = std::vector<VkVertexInputAttributeDescription>(
3242 pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
3243 }
3244 pPipeline->graphicsPipelineCI.pVertexInputState = &pPipeline->vertexInputCI;
3245 }
3246 if (pCreateInfo->pInputAssemblyState != NULL) {
3247 pPipeline->iaStateCI = *pCreateInfo->pInputAssemblyState;
3248 pPipeline->graphicsPipelineCI.pInputAssemblyState = &pPipeline->iaStateCI;
3249 }
3250 if (pCreateInfo->pTessellationState != NULL) {
3251 pPipeline->tessStateCI = *pCreateInfo->pTessellationState;
3252 pPipeline->graphicsPipelineCI.pTessellationState = &pPipeline->tessStateCI;
3253 }
3254 if (pCreateInfo->pViewportState != NULL) {
3255 pPipeline->vpStateCI = *pCreateInfo->pViewportState;
3256 pPipeline->graphicsPipelineCI.pViewportState = &pPipeline->vpStateCI;
3257 }
3258 if (pCreateInfo->pRasterizationState != NULL) {
3259 pPipeline->rsStateCI = *pCreateInfo->pRasterizationState;
3260 pPipeline->graphicsPipelineCI.pRasterizationState = &pPipeline->rsStateCI;
3261 }
3262 if (pCreateInfo->pMultisampleState != NULL) {
3263 pPipeline->msStateCI = *pCreateInfo->pMultisampleState;
3264 pPipeline->graphicsPipelineCI.pMultisampleState = &pPipeline->msStateCI;
3265 }
3266 if (pCreateInfo->pDepthStencilState != NULL) {
3267 pPipeline->dsStateCI = *pCreateInfo->pDepthStencilState;
3268 pPipeline->graphicsPipelineCI.pDepthStencilState = &pPipeline->dsStateCI;
3269 }
3270 if (pCreateInfo->pColorBlendState != NULL) {
3271 pPipeline->cbStateCI = *pCreateInfo->pColorBlendState;
3272 // Copy embedded ptrs
3273 pCBCI = pCreateInfo->pColorBlendState;
3274 if (pCBCI->attachmentCount) {
3275 pPipeline->attachments = std::vector<VkPipelineColorBlendAttachmentState>(
3276 pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
3277 }
3278 pPipeline->graphicsPipelineCI.pColorBlendState = &pPipeline->cbStateCI;
3279 }
3280 if (pCreateInfo->pDynamicState != NULL) {
3281 pPipeline->dynStateCI = *pCreateInfo->pDynamicState;
3282 if (pPipeline->dynStateCI.dynamicStateCount) {
3283 pPipeline->dynStateCI.pDynamicStates = new VkDynamicState[pPipeline->dynStateCI.dynamicStateCount];
3284 bufferSize = pPipeline->dynStateCI.dynamicStateCount * sizeof(VkDynamicState);
3285 memcpy((void *)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
3286 }
3287 pPipeline->graphicsPipelineCI.pDynamicState = &pPipeline->dynStateCI;
3288 }
3289 return pPipeline;
3290 }
3291
3292 // Free the Pipeline nodes
deletePipelines(layer_data * my_data)3293 static void deletePipelines(layer_data *my_data) {
3294 if (my_data->pipelineMap.size() <= 0)
3295 return;
3296 for (auto ii = my_data->pipelineMap.begin(); ii != my_data->pipelineMap.end(); ++ii) {
3297 if ((*ii).second->graphicsPipelineCI.stageCount != 0) {
3298 delete[](*ii).second->graphicsPipelineCI.pStages;
3299 }
3300 if ((*ii).second->dynStateCI.dynamicStateCount != 0) {
3301 delete[](*ii).second->dynStateCI.pDynamicStates;
3302 }
3303 delete (*ii).second;
3304 }
3305 my_data->pipelineMap.clear();
3306 }
3307
3308 // For given pipeline, return number of MSAA samples, or one if MSAA disabled
getNumSamples(layer_data * my_data,const VkPipeline pipeline)3309 static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3310 PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3311 if (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
3312 return pPipe->msStateCI.rasterizationSamples;
3313 }
3314 return VK_SAMPLE_COUNT_1_BIT;
3315 }
3316
3317 // Validate state related to the PSO
validatePipelineState(layer_data * my_data,const GLOBAL_CB_NODE * pCB,const VkPipelineBindPoint pipelineBindPoint,const VkPipeline pipeline)3318 static VkBool32 validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3319 const VkPipeline pipeline) {
3320 if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3321 // Verify that any MSAA request in PSO matches sample# in bound FB
3322 // Skip the check if rasterization is disabled.
3323 PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3324 if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3325 !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3326 VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3327 if (pCB->activeRenderPass) {
3328 const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3329 const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3330 VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3331 uint32_t i;
3332
3333 for (i = 0; i < pSD->colorAttachmentCount; i++) {
3334 VkSampleCountFlagBits samples;
3335
3336 if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3337 continue;
3338
3339 samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3340 if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3341 subpassNumSamples = samples;
3342 } else if (subpassNumSamples != samples) {
3343 subpassNumSamples = (VkSampleCountFlagBits)-1;
3344 break;
3345 }
3346 }
3347 if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3348 const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3349 if (subpassNumSamples == (VkSampleCountFlagBits)0)
3350 subpassNumSamples = samples;
3351 else if (subpassNumSamples != samples)
3352 subpassNumSamples = (VkSampleCountFlagBits)-1;
3353 }
3354
3355 if (psoNumSamples != subpassNumSamples) {
3356 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3357 (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3358 "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3359 ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3360 (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3361 }
3362 } else {
3363 // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3364 // Verify and flag error as appropriate
3365 }
3366 }
3367 // TODO : Add more checks here
3368 } else {
3369 // TODO : Validate non-gfx pipeline updates
3370 }
3371 return VK_FALSE;
3372 }
3373
3374 // Block of code at start here specifically for managing/tracking DSs
3375
3376 // Return Pool node ptr for specified pool or else NULL
getPoolNode(layer_data * my_data,const VkDescriptorPool pool)3377 static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3378 if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3379 return NULL;
3380 }
3381 return my_data->descriptorPoolMap[pool];
3382 }
3383
getLayoutNode(layer_data * my_data,const VkDescriptorSetLayout layout)3384 static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
3385 if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
3386 return NULL;
3387 }
3388 return my_data->descriptorSetLayoutMap[layout];
3389 }
3390
3391 // Return VK_FALSE if update struct is of valid type, otherwise flag error and return code from callback
validUpdateStruct(layer_data * my_data,const VkDevice device,const GENERIC_HEADER * pUpdateStruct)3392 static VkBool32 validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3393 switch (pUpdateStruct->sType) {
3394 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3395 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3396 return VK_FALSE;
3397 default:
3398 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3399 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3400 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3401 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3402 }
3403 }
3404
3405 // Set count for given update struct in the last parameter
3406 // Return value of skipCall, which is only VK_TRUE if error occurs and callback signals execution to cease
getUpdateCount(layer_data * my_data,const VkDevice device,const GENERIC_HEADER * pUpdateStruct)3407 static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3408 switch (pUpdateStruct->sType) {
3409 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3410 return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3411 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3412 // TODO : Need to understand this case better and make sure code is correct
3413 return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3414 default:
3415 return 0;
3416 }
3417 return 0;
3418 }
3419
3420 // For given Layout Node and binding, return index where that binding begins
getBindingStartIndex(const LAYOUT_NODE * pLayout,const uint32_t binding)3421 static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
3422 uint32_t offsetIndex = 0;
3423 for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
3424 if (pLayout->createInfo.pBindings[i].binding == binding)
3425 break;
3426 offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
3427 }
3428 return offsetIndex;
3429 }
3430
3431 // For given layout node and binding, return last index that is updated
getBindingEndIndex(const LAYOUT_NODE * pLayout,const uint32_t binding)3432 static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
3433 uint32_t offsetIndex = 0;
3434 for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
3435 offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
3436 if (pLayout->createInfo.pBindings[i].binding == binding)
3437 break;
3438 }
3439 return offsetIndex - 1;
3440 }
3441
3442 // For given layout and update, return the first overall index of the layout that is updated
getUpdateStartIndex(layer_data * my_data,const VkDevice device,const LAYOUT_NODE * pLayout,const uint32_t binding,const uint32_t arrayIndex,const GENERIC_HEADER * pUpdateStruct)3443 static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3444 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3445 return getBindingStartIndex(pLayout, binding) + arrayIndex;
3446 }
3447
3448 // For given layout and update, return the last overall index of the layout that is updated
getUpdateEndIndex(layer_data * my_data,const VkDevice device,const LAYOUT_NODE * pLayout,const uint32_t binding,const uint32_t arrayIndex,const GENERIC_HEADER * pUpdateStruct)3449 static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3450 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3451 uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3452 return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
3453 }
3454
3455 // Verify that the descriptor type in the update struct matches what's expected by the layout
validateUpdateConsistency(layer_data * my_data,const VkDevice device,const LAYOUT_NODE * pLayout,const GENERIC_HEADER * pUpdateStruct,uint32_t startIndex,uint32_t endIndex)3456 static VkBool32 validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
3457 const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3458 // First get actual type of update
3459 VkBool32 skipCall = VK_FALSE;
3460 VkDescriptorType actualType;
3461 uint32_t i = 0;
3462 switch (pUpdateStruct->sType) {
3463 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3464 actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3465 break;
3466 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3467 /* no need to validate */
3468 return VK_FALSE;
3469 break;
3470 default:
3471 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3472 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3473 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3474 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3475 }
3476 if (VK_FALSE == skipCall) {
3477 // Set first stageFlags as reference and verify that all other updates match it
3478 VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
3479 for (i = startIndex; i <= endIndex; i++) {
3480 if (pLayout->descriptorTypes[i] != actualType) {
3481 skipCall |= log_msg(
3482 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3483 DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3484 "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3485 string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
3486 }
3487 if (pLayout->stageFlags[i] != refStageFlags) {
3488 skipCall |= log_msg(
3489 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3490 DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
3491 "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
3492 refStageFlags, pLayout->stageFlags[i]);
3493 }
3494 }
3495 }
3496 return skipCall;
3497 }
3498
3499 // Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3500 // struct into the pNewNode param. Return VK_TRUE if error condition encountered and callback signals early exit.
3501 // NOTE : Calls to this function should be wrapped in mutex
shadowUpdateNode(layer_data * my_data,const VkDevice device,GENERIC_HEADER * pUpdate,GENERIC_HEADER ** pNewNode)3502 static VkBool32 shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3503 VkBool32 skipCall = VK_FALSE;
3504 VkWriteDescriptorSet *pWDS = NULL;
3505 VkCopyDescriptorSet *pCDS = NULL;
3506 switch (pUpdate->sType) {
3507 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3508 pWDS = new VkWriteDescriptorSet;
3509 *pNewNode = (GENERIC_HEADER *)pWDS;
3510 memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3511
3512 switch (pWDS->descriptorType) {
3513 case VK_DESCRIPTOR_TYPE_SAMPLER:
3514 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3515 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3516 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3517 VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3518 memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3519 pWDS->pImageInfo = info;
3520 } break;
3521 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3522 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3523 VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3524 memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3525 pWDS->pTexelBufferView = info;
3526 } break;
3527 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3528 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3529 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3530 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3531 VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3532 memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3533 pWDS->pBufferInfo = info;
3534 } break;
3535 default:
3536 return VK_ERROR_VALIDATION_FAILED_EXT;
3537 break;
3538 }
3539 break;
3540 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3541 pCDS = new VkCopyDescriptorSet;
3542 *pNewNode = (GENERIC_HEADER *)pCDS;
3543 memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3544 break;
3545 default:
3546 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3547 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3548 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3549 string_VkStructureType(pUpdate->sType), pUpdate->sType))
3550 return VK_TRUE;
3551 }
3552 // Make sure that pNext for the end of shadow copy is NULL
3553 (*pNewNode)->pNext = NULL;
3554 return skipCall;
3555 }
3556
3557 // Verify that given sampler is valid
validateSampler(const layer_data * my_data,const VkSampler * pSampler,const VkBool32 immutable)3558 static VkBool32 validateSampler(const layer_data *my_data, const VkSampler *pSampler, const VkBool32 immutable) {
3559 VkBool32 skipCall = VK_FALSE;
3560 auto sampIt = my_data->sampleMap.find(*pSampler);
3561 if (sampIt == my_data->sampleMap.end()) {
3562 if (!immutable) {
3563 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3564 (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3565 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3566 (uint64_t)*pSampler);
3567 } else { // immutable
3568 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3569 (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3570 "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3571 "sampler %#" PRIxLEAST64,
3572 (uint64_t)*pSampler);
3573 }
3574 } else {
3575 // TODO : Any further checks we want to do on the sampler?
3576 }
3577 return skipCall;
3578 }
3579
3580 // find layout(s) on the cmd buf level
FindLayout(const GLOBAL_CB_NODE * pCB,VkImage image,VkImageSubresource range,IMAGE_CMD_BUF_LAYOUT_NODE & node)3581 bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3582 ImageSubresourcePair imgpair = {image, true, range};
3583 auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3584 if (imgsubIt == pCB->imageLayoutMap.end()) {
3585 imgpair = {image, false, VkImageSubresource()};
3586 imgsubIt = pCB->imageLayoutMap.find(imgpair);
3587 if (imgsubIt == pCB->imageLayoutMap.end())
3588 return false;
3589 }
3590 node = imgsubIt->second;
3591 return true;
3592 }
3593
3594 // find layout(s) on the global level
FindLayout(const layer_data * my_data,ImageSubresourcePair imgpair,VkImageLayout & layout)3595 bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3596 auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3597 if (imgsubIt == my_data->imageLayoutMap.end()) {
3598 imgpair = {imgpair.image, false, VkImageSubresource()};
3599 imgsubIt = my_data->imageLayoutMap.find(imgpair);
3600 if (imgsubIt == my_data->imageLayoutMap.end())
3601 return false;
3602 }
3603 layout = imgsubIt->second.layout;
3604 return true;
3605 }
3606
FindLayout(const layer_data * my_data,VkImage image,VkImageSubresource range,VkImageLayout & layout)3607 bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3608 ImageSubresourcePair imgpair = {image, true, range};
3609 return FindLayout(my_data, imgpair, layout);
3610 }
3611
FindLayouts(const layer_data * my_data,VkImage image,std::vector<VkImageLayout> & layouts)3612 bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3613 auto sub_data = my_data->imageSubresourceMap.find(image);
3614 if (sub_data == my_data->imageSubresourceMap.end())
3615 return false;
3616 auto imgIt = my_data->imageMap.find(image);
3617 if (imgIt == my_data->imageMap.end())
3618 return false;
3619 bool ignoreGlobal = false;
3620 // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3621 // potential errors in this case.
3622 if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3623 ignoreGlobal = true;
3624 }
3625 for (auto imgsubpair : sub_data->second) {
3626 if (ignoreGlobal && !imgsubpair.hasSubresource)
3627 continue;
3628 auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3629 if (img_data != my_data->imageLayoutMap.end()) {
3630 layouts.push_back(img_data->second.layout);
3631 }
3632 }
3633 return true;
3634 }
3635
3636 // Set the layout on the global level
SetLayout(layer_data * my_data,ImageSubresourcePair imgpair,const VkImageLayout & layout)3637 void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3638 VkImage &image = imgpair.image;
3639 // TODO (mlentine): Maybe set format if new? Not used atm.
3640 my_data->imageLayoutMap[imgpair].layout = layout;
3641 // TODO (mlentine): Maybe make vector a set?
3642 auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3643 if (subresource == my_data->imageSubresourceMap[image].end()) {
3644 my_data->imageSubresourceMap[image].push_back(imgpair);
3645 }
3646 }
3647
SetLayout(layer_data * my_data,VkImage image,const VkImageLayout & layout)3648 void SetLayout(layer_data *my_data, VkImage image, const VkImageLayout &layout) {
3649 ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3650 SetLayout(my_data, imgpair, layout);
3651 }
3652
SetLayout(layer_data * my_data,VkImage image,VkImageSubresource range,const VkImageLayout & layout)3653 void SetLayout(layer_data *my_data, VkImage image, VkImageSubresource range, const VkImageLayout &layout) {
3654 ImageSubresourcePair imgpair = {image, true, range};
3655 SetLayout(my_data, imgpair, layout);
3656 }
3657
3658 // Set the layout on the cmdbuf level
SetLayout(GLOBAL_CB_NODE * pCB,VkImage image,ImageSubresourcePair imgpair,const IMAGE_CMD_BUF_LAYOUT_NODE & node)3659 void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3660 pCB->imageLayoutMap[imgpair] = node;
3661 // TODO (mlentine): Maybe make vector a set?
3662 auto subresource = std::find(pCB->imageSubresourceMap[image].begin(), pCB->imageSubresourceMap[image].end(), imgpair);
3663 if (subresource == pCB->imageSubresourceMap[image].end()) {
3664 pCB->imageSubresourceMap[image].push_back(imgpair);
3665 }
3666 }
3667
SetLayout(GLOBAL_CB_NODE * pCB,VkImage image,ImageSubresourcePair imgpair,const VkImageLayout & layout)3668 void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3669 // TODO (mlentine): Maybe make vector a set?
3670 if (std::find(pCB->imageSubresourceMap[image].begin(), pCB->imageSubresourceMap[image].end(), imgpair) !=
3671 pCB->imageSubresourceMap[image].end()) {
3672 pCB->imageLayoutMap[imgpair].layout = layout;
3673 } else {
3674 // TODO (mlentine): Could be expensive and might need to be removed.
3675 assert(imgpair.hasSubresource);
3676 IMAGE_CMD_BUF_LAYOUT_NODE node;
3677 FindLayout(pCB, image, imgpair.subresource, node);
3678 SetLayout(pCB, image, imgpair, {node.initialLayout, layout});
3679 }
3680 }
3681
SetLayout(GLOBAL_CB_NODE * pCB,VkImage image,const IMAGE_CMD_BUF_LAYOUT_NODE & node)3682 void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3683 ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3684 SetLayout(pCB, image, imgpair, node);
3685 }
3686
SetLayout(GLOBAL_CB_NODE * pCB,VkImage image,VkImageSubresource range,const IMAGE_CMD_BUF_LAYOUT_NODE & node)3687 void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3688 ImageSubresourcePair imgpair = {image, true, range};
3689 SetLayout(pCB, image, imgpair, node);
3690 }
3691
SetLayout(GLOBAL_CB_NODE * pCB,VkImage image,const VkImageLayout & layout)3692 void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, const VkImageLayout &layout) {
3693 ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3694 SetLayout(pCB, image, imgpair, layout);
3695 }
3696
SetLayout(GLOBAL_CB_NODE * pCB,VkImage image,VkImageSubresource range,const VkImageLayout & layout)3697 void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, const VkImageLayout &layout) {
3698 ImageSubresourcePair imgpair = {image, true, range};
3699 SetLayout(pCB, image, imgpair, layout);
3700 }
3701
SetLayout(const layer_data * dev_data,GLOBAL_CB_NODE * pCB,VkImageView imageView,const VkImageLayout & layout)3702 void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3703 auto image_view_data = dev_data->imageViewMap.find(imageView);
3704 assert(image_view_data != dev_data->imageViewMap.end());
3705 const VkImage &image = image_view_data->second.image;
3706 const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3707 // TODO: Do not iterate over every possibility - consolidate where possible
3708 for (uint32_t j = 0; j < subRange.levelCount; j++) {
3709 uint32_t level = subRange.baseMipLevel + j;
3710 for (uint32_t k = 0; k < subRange.layerCount; k++) {
3711 uint32_t layer = subRange.baseArrayLayer + k;
3712 VkImageSubresource sub = {subRange.aspectMask, level, layer};
3713 SetLayout(pCB, image, sub, layout);
3714 }
3715 }
3716 }
3717
3718 // Verify that given imageView is valid
validateImageView(const layer_data * my_data,const VkImageView * pImageView,const VkImageLayout imageLayout)3719 static VkBool32 validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3720 VkBool32 skipCall = VK_FALSE;
3721 auto ivIt = my_data->imageViewMap.find(*pImageView);
3722 if (ivIt == my_data->imageViewMap.end()) {
3723 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3724 (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3725 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3726 (uint64_t)*pImageView);
3727 } else {
3728 // Validate that imageLayout is compatible with aspectMask and image format
3729 VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3730 VkImage image = ivIt->second.image;
3731 // TODO : Check here in case we have a bad image
3732 VkFormat format = VK_FORMAT_MAX_ENUM;
3733 auto imgIt = my_data->imageMap.find(image);
3734 if (imgIt != my_data->imageMap.end()) {
3735 format = (*imgIt).second.createInfo.format;
3736 } else {
3737 // Also need to check the swapchains.
3738 auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3739 if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3740 VkSwapchainKHR swapchain = swapchainIt->second;
3741 auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3742 if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3743 SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3744 format = pswapchain_node->createInfo.imageFormat;
3745 }
3746 }
3747 }
3748 if (format == VK_FORMAT_MAX_ENUM) {
3749 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3750 (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3751 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3752 " in imageView %#" PRIxLEAST64,
3753 (uint64_t)image, (uint64_t)*pImageView);
3754 } else {
3755 VkBool32 ds = vk_format_is_depth_or_stencil(format);
3756 switch (imageLayout) {
3757 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3758 // Only Color bit must be set
3759 if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3760 skipCall |=
3761 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3762 (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3763 "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3764 "and imageView %#" PRIxLEAST64 ""
3765 " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3766 (uint64_t)*pImageView);
3767 }
3768 // format must NOT be DS
3769 if (ds) {
3770 skipCall |=
3771 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3772 (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3773 "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3774 "and imageView %#" PRIxLEAST64 ""
3775 " but the image format is %s which is not a color format.",
3776 (uint64_t)*pImageView, string_VkFormat(format));
3777 }
3778 break;
3779 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3780 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3781 // Depth or stencil bit must be set, but both must NOT be set
3782 if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3783 if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3784 // both must NOT be set
3785 skipCall |=
3786 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3787 (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3788 "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3789 " that has both STENCIL and DEPTH aspects set",
3790 (uint64_t)*pImageView);
3791 }
3792 } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3793 // Neither were set
3794 skipCall |=
3795 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3796 (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3797 "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3798 " that does not have STENCIL or DEPTH aspect set.",
3799 string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3800 }
3801 // format must be DS
3802 if (!ds) {
3803 skipCall |=
3804 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3805 (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3806 "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3807 " but the image format is %s which is not a depth/stencil format.",
3808 string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3809 }
3810 break;
3811 default:
3812 // anything to check for other layouts?
3813 break;
3814 }
3815 }
3816 }
3817 return skipCall;
3818 }
3819
3820 // Verify that given bufferView is valid
validateBufferView(const layer_data * my_data,const VkBufferView * pBufferView)3821 static VkBool32 validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3822 VkBool32 skipCall = VK_FALSE;
3823 auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3824 if (sampIt == my_data->bufferViewMap.end()) {
3825 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3826 (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3827 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3828 (uint64_t)*pBufferView);
3829 } else {
3830 // TODO : Any further checks we want to do on the bufferView?
3831 }
3832 return skipCall;
3833 }
3834
3835 // Verify that given bufferInfo is valid
validateBufferInfo(const layer_data * my_data,const VkDescriptorBufferInfo * pBufferInfo)3836 static VkBool32 validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3837 VkBool32 skipCall = VK_FALSE;
3838 auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3839 if (sampIt == my_data->bufferMap.end()) {
3840 skipCall |=
3841 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3842 (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3843 "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3844 (uint64_t)pBufferInfo->buffer);
3845 } else {
3846 // TODO : Any further checks we want to do on the bufferView?
3847 }
3848 return skipCall;
3849 }
3850
validateUpdateContents(const layer_data * my_data,const VkWriteDescriptorSet * pWDS,const VkDescriptorSetLayoutBinding * pLayoutBinding)3851 static VkBool32 validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3852 const VkDescriptorSetLayoutBinding *pLayoutBinding) {
3853 VkBool32 skipCall = VK_FALSE;
3854 // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3855 const VkSampler *pSampler = NULL;
3856 VkBool32 immutable = VK_FALSE;
3857 uint32_t i = 0;
3858 // For given update type, verify that update contents are correct
3859 switch (pWDS->descriptorType) {
3860 case VK_DESCRIPTOR_TYPE_SAMPLER:
3861 for (i = 0; i < pWDS->descriptorCount; ++i) {
3862 skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3863 }
3864 break;
3865 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3866 for (i = 0; i < pWDS->descriptorCount; ++i) {
3867 if (NULL == pLayoutBinding->pImmutableSamplers) {
3868 pSampler = &(pWDS->pImageInfo[i].sampler);
3869 if (immutable) {
3870 skipCall |= log_msg(
3871 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3872 (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3873 "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3874 ", but previous update(s) from this "
3875 "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3876 "use immutable or non-immutable samplers.",
3877 i, (uint64_t)*pSampler);
3878 }
3879 } else {
3880 if (i > 0 && !immutable) {
3881 skipCall |= log_msg(
3882 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3883 (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3884 "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3885 "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3886 "use immutable or non-immutable samplers.",
3887 i);
3888 }
3889 immutable = VK_TRUE;
3890 pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
3891 }
3892 skipCall |= validateSampler(my_data, pSampler, immutable);
3893 }
3894 // Intentionally fall through here to also validate image stuff
3895 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3896 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3897 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3898 for (i = 0; i < pWDS->descriptorCount; ++i) {
3899 skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3900 }
3901 break;
3902 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3903 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3904 for (i = 0; i < pWDS->descriptorCount; ++i) {
3905 skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3906 }
3907 break;
3908 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3909 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3910 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3911 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3912 for (i = 0; i < pWDS->descriptorCount; ++i) {
3913 skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3914 }
3915 break;
3916 default:
3917 break;
3918 }
3919 return skipCall;
3920 }
3921 // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3922 // func_str is the name of the calling function
3923 // Return VK_FALSE if no errors occur
3924 // Return VK_TRUE if validation error occurs and callback returns VK_TRUE (to skip upcoming API call down the chain)
validateIdleDescriptorSet(const layer_data * my_data,VkDescriptorSet set,std::string func_str)3925 VkBool32 validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3926 VkBool32 skip_call = VK_FALSE;
3927 auto set_node = my_data->setMap.find(set);
3928 if (set_node == my_data->setMap.end()) {
3929 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3930 (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3931 "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3932 (uint64_t)(set));
3933 } else {
3934 if (set_node->second->in_use.load()) {
3935 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3936 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3937 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3938 func_str.c_str(), (uint64_t)(set));
3939 }
3940 }
3941 return skip_call;
3942 }
invalidateBoundCmdBuffers(layer_data * dev_data,const SET_NODE * pSet)3943 static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
3944 // Flag any CBs this set is bound to as INVALID
3945 for (auto cb : pSet->boundCmdBuffers) {
3946 auto cb_node = dev_data->commandBufferMap.find(cb);
3947 if (cb_node != dev_data->commandBufferMap.end()) {
3948 cb_node->second->state = CB_INVALID;
3949 }
3950 }
3951 }
3952 // update DS mappings based on write and copy update arrays
dsUpdate(layer_data * my_data,VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pWDS,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pCDS)3953 static VkBool32 dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3954 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3955 VkBool32 skipCall = VK_FALSE;
3956
3957 LAYOUT_NODE *pLayout = NULL;
3958 VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
3959 // Validate Write updates
3960 uint32_t i = 0;
3961 for (i = 0; i < descriptorWriteCount; i++) {
3962 VkDescriptorSet ds = pWDS[i].dstSet;
3963 SET_NODE *pSet = my_data->setMap[ds];
3964 // Set being updated cannot be in-flight
3965 if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == VK_TRUE)
3966 return skipCall;
3967 // If set is bound to any cmdBuffers, mark them invalid
3968 invalidateBoundCmdBuffers(my_data, pSet);
3969 GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
3970 pLayout = pSet->pLayout;
3971 // First verify valid update struct
3972 if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == VK_TRUE) {
3973 break;
3974 }
3975 uint32_t binding = 0, endIndex = 0;
3976 binding = pWDS[i].dstBinding;
3977 auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
3978 // Make sure that layout being updated has the binding being updated
3979 if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
3980 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3981 (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3982 "Descriptor Set %" PRIu64 " does not have binding to match "
3983 "update binding %u for update type "
3984 "%s!",
3985 (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
3986 } else {
3987 // Next verify that update falls within size of given binding
3988 endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
3989 if (getBindingEndIndex(pLayout, binding) < endIndex) {
3990 pLayoutCI = &pLayout->createInfo;
3991 string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} ");
3992 skipCall |=
3993 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3994 (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3995 "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
3996 string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
3997 } else { // TODO : should we skip update on a type mismatch or force it?
3998 uint32_t startIndex;
3999 startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4000 // Layout bindings match w/ update, now verify that update type
4001 // & stageFlags are the same for entire update
4002 if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == VK_FALSE) {
4003 // The update is within bounds and consistent, but need to
4004 // make sure contents make sense as well
4005 if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
4006 &pLayout->createInfo.pBindings[bindingToIndex->second])) == VK_FALSE) {
4007 // Update is good. Save the update info
4008 // Create new update struct for this set's shadow copy
4009 GENERIC_HEADER *pNewNode = NULL;
4010 skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
4011 if (NULL == pNewNode) {
4012 skipCall |= log_msg(
4013 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4014 (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4015 "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
4016 } else {
4017 // Insert shadow node into LL of updates for this set
4018 pNewNode->pNext = pSet->pUpdateStructs;
4019 pSet->pUpdateStructs = pNewNode;
4020 // Now update appropriate descriptor(s) to point to new Update node
4021 for (uint32_t j = startIndex; j <= endIndex; j++) {
4022 assert(j < pSet->descriptorCount);
4023 pSet->ppDescriptors[j] = pNewNode;
4024 }
4025 }
4026 }
4027 }
4028 }
4029 }
4030 }
4031 // Now validate copy updates
4032 for (i = 0; i < descriptorCopyCount; ++i) {
4033 SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
4034 LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
4035 uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
4036 // For each copy make sure that update falls within given layout and that types match
4037 pSrcSet = my_data->setMap[pCDS[i].srcSet];
4038 pDstSet = my_data->setMap[pCDS[i].dstSet];
4039 // Set being updated cannot be in-flight
4040 if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == VK_TRUE)
4041 return skipCall;
4042 invalidateBoundCmdBuffers(my_data, pDstSet);
4043 pSrcLayout = pSrcSet->pLayout;
4044 pDstLayout = pDstSet->pLayout;
4045 // Validate that src binding is valid for src set layout
4046 if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
4047 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4048 (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4049 "Copy descriptor update %u has srcBinding %u "
4050 "which is out of bounds for underlying SetLayout "
4051 "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4052 i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
4053 } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
4054 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4055 (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4056 "Copy descriptor update %u has dstBinding %u "
4057 "which is out of bounds for underlying SetLayout "
4058 "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4059 i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
4060 } else {
4061 // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
4062 srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4063 (const GENERIC_HEADER *)&(pCDS[i]));
4064 dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4065 (const GENERIC_HEADER *)&(pCDS[i]));
4066 if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
4067 pLayoutCI = &pSrcLayout->createInfo;
4068 string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} ");
4069 skipCall |=
4070 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4071 (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4072 "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4073 pCDS[i].srcBinding, DSstr.c_str());
4074 } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
4075 pLayoutCI = &pDstLayout->createInfo;
4076 string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} ");
4077 skipCall |=
4078 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4079 (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4080 "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4081 pCDS[i].dstBinding, DSstr.c_str());
4082 } else {
4083 srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4084 (const GENERIC_HEADER *)&(pCDS[i]));
4085 dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4086 (const GENERIC_HEADER *)&(pCDS[i]));
4087 for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
4088 // For copy just make sure that the types match and then perform the update
4089 if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
4090 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4091 __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
4092 "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
4093 "that does not match overlapping dest descriptor type of %s!",
4094 i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
4095 string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
4096 } else {
4097 // point dst descriptor at corresponding src descriptor
4098 // TODO : This may be a hole. I believe copy should be its own copy,
4099 // otherwise a subsequent write update to src will incorrectly affect the copy
4100 pDstSet->ppDescriptors[j + dstStartIndex] = pSrcSet->ppDescriptors[j + srcStartIndex];
4101 pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
4102 }
4103 }
4104 }
4105 }
4106 }
4107 return skipCall;
4108 }
4109
4110 // Verify that given pool has descriptors that are being requested for allocation.
4111 // NOTE : Calls to this function should be wrapped in mutex
validate_descriptor_availability_in_pool(layer_data * dev_data,DESCRIPTOR_POOL_NODE * pPoolNode,uint32_t count,const VkDescriptorSetLayout * pSetLayouts)4112 static VkBool32 validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
4113 const VkDescriptorSetLayout *pSetLayouts) {
4114 VkBool32 skipCall = VK_FALSE;
4115 uint32_t i = 0;
4116 uint32_t j = 0;
4117
4118 // Track number of descriptorSets allowable in this pool
4119 if (pPoolNode->availableSets < count) {
4120 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4121 reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4122 "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64
4123 ". This pool only has %d descriptorSets remaining.",
4124 count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
4125 } else {
4126 pPoolNode->availableSets -= count;
4127 }
4128
4129 for (i = 0; i < count; ++i) {
4130 LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
4131 if (NULL == pLayout) {
4132 skipCall |=
4133 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
4134 (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
4135 "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
4136 (uint64_t)pSetLayouts[i]);
4137 } else {
4138 uint32_t typeIndex = 0, poolSizeCount = 0;
4139 for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
4140 typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
4141 poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
4142 if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
4143 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4144 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
4145 DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4146 "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
4147 ". This pool only has %d descriptors of this type remaining.",
4148 poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
4149 (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
4150 } else { // Decrement available descriptors of this type
4151 pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4152 }
4153 }
4154 }
4155 }
4156 return skipCall;
4157 }
4158
4159 // Free the shadowed update node for this Set
4160 // NOTE : Calls to this function should be wrapped in mutex
freeShadowUpdateTree(SET_NODE * pSet)4161 static void freeShadowUpdateTree(SET_NODE *pSet) {
4162 GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4163 pSet->pUpdateStructs = NULL;
4164 GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4165 // Clear the descriptor mappings as they will now be invalid
4166 memset(pSet->ppDescriptors, 0, pSet->descriptorCount * sizeof(GENERIC_HEADER *));
4167 while (pShadowUpdate) {
4168 pFreeUpdate = pShadowUpdate;
4169 pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4170 VkWriteDescriptorSet *pWDS = NULL;
4171 switch (pFreeUpdate->sType) {
4172 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4173 pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4174 switch (pWDS->descriptorType) {
4175 case VK_DESCRIPTOR_TYPE_SAMPLER:
4176 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4177 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4178 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4179 delete[] pWDS->pImageInfo;
4180 } break;
4181 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4182 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4183 delete[] pWDS->pTexelBufferView;
4184 } break;
4185 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4186 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4187 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4188 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4189 delete[] pWDS->pBufferInfo;
4190 } break;
4191 default:
4192 break;
4193 }
4194 break;
4195 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4196 break;
4197 default:
4198 assert(0);
4199 break;
4200 }
4201 delete pFreeUpdate;
4202 }
4203 }
4204
4205 // Free all DS Pools including their Sets & related sub-structs
4206 // NOTE : Calls to this function should be wrapped in mutex
deletePools(layer_data * my_data)4207 static void deletePools(layer_data *my_data) {
4208 if (my_data->descriptorPoolMap.size() <= 0)
4209 return;
4210 for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4211 SET_NODE *pSet = (*ii).second->pSets;
4212 SET_NODE *pFreeSet = pSet;
4213 while (pSet) {
4214 pFreeSet = pSet;
4215 pSet = pSet->pNext;
4216 // Freeing layouts handled in deleteLayouts() function
4217 // Free Update shadow struct tree
4218 freeShadowUpdateTree(pFreeSet);
4219 delete[] pFreeSet->ppDescriptors;
4220 delete pFreeSet;
4221 }
4222 delete (*ii).second;
4223 }
4224 my_data->descriptorPoolMap.clear();
4225 }
4226
4227 // WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
4228 // NOTE : Calls to this function should be wrapped in mutex
deleteLayouts(layer_data * my_data)4229 static void deleteLayouts(layer_data *my_data) {
4230 if (my_data->descriptorSetLayoutMap.size() <= 0)
4231 return;
4232 for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
4233 LAYOUT_NODE *pLayout = (*ii).second;
4234 if (pLayout->createInfo.pBindings) {
4235 for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
4236 delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
4237 }
4238 delete[] pLayout->createInfo.pBindings;
4239 }
4240 delete pLayout;
4241 }
4242 my_data->descriptorSetLayoutMap.clear();
4243 }
4244
4245 // Currently clearing a set is removing all previous updates to that set
4246 // TODO : Validate if this is correct clearing behavior
clearDescriptorSet(layer_data * my_data,VkDescriptorSet set)4247 static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4248 SET_NODE *pSet = getSetNode(my_data, set);
4249 if (!pSet) {
4250 // TODO : Return error
4251 } else {
4252 freeShadowUpdateTree(pSet);
4253 }
4254 }
4255
clearDescriptorPool(layer_data * my_data,const VkDevice device,const VkDescriptorPool pool,VkDescriptorPoolResetFlags flags)4256 static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4257 VkDescriptorPoolResetFlags flags) {
4258 DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4259 if (!pPool) {
4260 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4261 (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4262 "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4263 } else {
4264 // TODO: validate flags
4265 // For every set off of this pool, clear it
4266 SET_NODE *pSet = pPool->pSets;
4267 while (pSet) {
4268 clearDescriptorSet(my_data, pSet->set);
4269 pSet = pSet->pNext;
4270 }
4271 // Reset available count to max count for this pool
4272 for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4273 pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4274 }
4275 }
4276 }
4277
4278 // For given CB object, fetch associated CB Node from map
getCBNode(layer_data * my_data,const VkCommandBuffer cb)4279 static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4280 if (my_data->commandBufferMap.count(cb) == 0) {
4281 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4282 reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4283 "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4284 return NULL;
4285 }
4286 return my_data->commandBufferMap[cb];
4287 }
4288
4289 // Free all CB Nodes
4290 // NOTE : Calls to this function should be wrapped in mutex
deleteCommandBuffers(layer_data * my_data)4291 static void deleteCommandBuffers(layer_data *my_data) {
4292 if (my_data->commandBufferMap.size() <= 0) {
4293 return;
4294 }
4295 for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4296 delete (*ii).second;
4297 }
4298 my_data->commandBufferMap.clear();
4299 }
4300
report_error_no_cb_begin(const layer_data * dev_data,const VkCommandBuffer cb,const char * caller_name)4301 static VkBool32 report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4302 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4303 (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4304 "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4305 }
4306
validateCmdsInCmdBuffer(const layer_data * dev_data,const GLOBAL_CB_NODE * pCB,const CMD_TYPE cmd_type)4307 VkBool32 validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4308 if (!pCB->activeRenderPass)
4309 return VK_FALSE;
4310 VkBool32 skip_call = VK_FALSE;
4311 if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4312 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4313 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4314 "Commands cannot be called in a subpass using secondary command buffers.");
4315 } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4316 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4317 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4318 "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4319 }
4320 return skip_call;
4321 }
4322
checkGraphicsBit(const layer_data * my_data,VkQueueFlags flags,const char * name)4323 static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4324 if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4325 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4326 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4327 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4328 return false;
4329 }
4330
checkComputeBit(const layer_data * my_data,VkQueueFlags flags,const char * name)4331 static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4332 if (!(flags & VK_QUEUE_COMPUTE_BIT))
4333 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4334 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4335 "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4336 return false;
4337 }
4338
checkGraphicsOrComputeBit(const layer_data * my_data,VkQueueFlags flags,const char * name)4339 static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4340 if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4341 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4342 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4343 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4344 return false;
4345 }
4346
4347 // Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4348 // in the recording state or if there's an issue with the Cmd ordering
addCmd(const layer_data * my_data,GLOBAL_CB_NODE * pCB,const CMD_TYPE cmd,const char * caller_name)4349 static VkBool32 addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4350 VkBool32 skipCall = VK_FALSE;
4351 auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4352 if (pool_data != my_data->commandPoolMap.end()) {
4353 VkQueueFlags flags = my_data->physDevProperties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4354 switch (cmd) {
4355 case CMD_BINDPIPELINE:
4356 case CMD_BINDPIPELINEDELTA:
4357 case CMD_BINDDESCRIPTORSETS:
4358 case CMD_FILLBUFFER:
4359 case CMD_CLEARCOLORIMAGE:
4360 case CMD_SETEVENT:
4361 case CMD_RESETEVENT:
4362 case CMD_WAITEVENTS:
4363 case CMD_BEGINQUERY:
4364 case CMD_ENDQUERY:
4365 case CMD_RESETQUERYPOOL:
4366 case CMD_COPYQUERYPOOLRESULTS:
4367 case CMD_WRITETIMESTAMP:
4368 skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4369 break;
4370 case CMD_SETVIEWPORTSTATE:
4371 case CMD_SETSCISSORSTATE:
4372 case CMD_SETLINEWIDTHSTATE:
4373 case CMD_SETDEPTHBIASSTATE:
4374 case CMD_SETBLENDSTATE:
4375 case CMD_SETDEPTHBOUNDSSTATE:
4376 case CMD_SETSTENCILREADMASKSTATE:
4377 case CMD_SETSTENCILWRITEMASKSTATE:
4378 case CMD_SETSTENCILREFERENCESTATE:
4379 case CMD_BINDINDEXBUFFER:
4380 case CMD_BINDVERTEXBUFFER:
4381 case CMD_DRAW:
4382 case CMD_DRAWINDEXED:
4383 case CMD_DRAWINDIRECT:
4384 case CMD_DRAWINDEXEDINDIRECT:
4385 case CMD_BLITIMAGE:
4386 case CMD_CLEARATTACHMENTS:
4387 case CMD_CLEARDEPTHSTENCILIMAGE:
4388 case CMD_RESOLVEIMAGE:
4389 case CMD_BEGINRENDERPASS:
4390 case CMD_NEXTSUBPASS:
4391 case CMD_ENDRENDERPASS:
4392 skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4393 break;
4394 case CMD_DISPATCH:
4395 case CMD_DISPATCHINDIRECT:
4396 skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4397 break;
4398 case CMD_COPYBUFFER:
4399 case CMD_COPYIMAGE:
4400 case CMD_COPYBUFFERTOIMAGE:
4401 case CMD_COPYIMAGETOBUFFER:
4402 case CMD_CLONEIMAGEDATA:
4403 case CMD_UPDATEBUFFER:
4404 case CMD_PIPELINEBARRIER:
4405 case CMD_EXECUTECOMMANDS:
4406 break;
4407 default:
4408 break;
4409 }
4410 }
4411 if (pCB->state != CB_RECORDING) {
4412 skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4413 skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4414 CMD_NODE cmdNode = {};
4415 // init cmd node and append to end of cmd LL
4416 cmdNode.cmdNumber = ++pCB->numCmds;
4417 cmdNode.type = cmd;
4418 pCB->cmds.push_back(cmdNode);
4419 }
4420 return skipCall;
4421 }
4422 // Reset the command buffer state
4423 // Maintain the createInfo and set state to CB_NEW, but clear all other state
resetCB(layer_data * my_data,const VkCommandBuffer cb)4424 static void resetCB(layer_data *my_data, const VkCommandBuffer cb) {
4425 GLOBAL_CB_NODE *pCB = my_data->commandBufferMap[cb];
4426 if (pCB) {
4427 pCB->cmds.clear();
4428 // Reset CB state (note that createInfo is not cleared)
4429 pCB->commandBuffer = cb;
4430 memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4431 memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4432 pCB->numCmds = 0;
4433 memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4434 pCB->state = CB_NEW;
4435 pCB->submitCount = 0;
4436 pCB->status = 0;
4437 pCB->viewports.clear();
4438 pCB->scissors.clear();
4439 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4440 // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4441 for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4442 auto set_node = my_data->setMap.find(set);
4443 if (set_node != my_data->setMap.end()) {
4444 set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4445 }
4446 }
4447 pCB->lastBound[i].reset();
4448 }
4449 memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4450 pCB->activeRenderPass = 0;
4451 pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4452 pCB->activeSubpass = 0;
4453 pCB->framebuffer = 0;
4454 pCB->fenceId = 0;
4455 pCB->lastSubmittedFence = VK_NULL_HANDLE;
4456 pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4457 pCB->destroyedSets.clear();
4458 pCB->updatedSets.clear();
4459 pCB->destroyedFramebuffers.clear();
4460 pCB->waitedEvents.clear();
4461 pCB->semaphores.clear();
4462 pCB->events.clear();
4463 pCB->waitedEventsBeforeQueryReset.clear();
4464 pCB->queryToStateMap.clear();
4465 pCB->activeQueries.clear();
4466 pCB->startedQueries.clear();
4467 pCB->imageLayoutMap.clear();
4468 pCB->eventToStageMap.clear();
4469 pCB->drawData.clear();
4470 pCB->currentDrawData.buffers.clear();
4471 pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4472 pCB->secondaryCommandBuffers.clear();
4473 pCB->activeDescriptorSets.clear();
4474 pCB->validate_functions.clear();
4475 pCB->pMemObjList.clear();
4476 pCB->eventUpdates.clear();
4477 }
4478 }
4479
4480 // Set PSO-related status bits for CB, including dynamic state set via PSO
set_cb_pso_status(GLOBAL_CB_NODE * pCB,const PIPELINE_NODE * pPipe)4481 static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4482 for (auto const & att : pPipe->attachments) {
4483 if (0 != att.colorWriteMask) {
4484 pCB->status |= CBSTATUS_COLOR_BLEND_WRITE_ENABLE;
4485 }
4486 }
4487 if (pPipe->dsStateCI.depthWriteEnable) {
4488 pCB->status |= CBSTATUS_DEPTH_WRITE_ENABLE;
4489 }
4490 if (pPipe->dsStateCI.stencilTestEnable) {
4491 pCB->status |= CBSTATUS_STENCIL_TEST_ENABLE;
4492 }
4493 // Account for any dynamic state not set via this PSO
4494 if (!pPipe->dynStateCI.dynamicStateCount) { // All state is static
4495 pCB->status = CBSTATUS_ALL;
4496 } else {
4497 // First consider all state on
4498 // Then unset any state that's noted as dynamic in PSO
4499 // Finally OR that into CB statemask
4500 CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4501 for (uint32_t i = 0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
4502 switch (pPipe->dynStateCI.pDynamicStates[i]) {
4503 case VK_DYNAMIC_STATE_VIEWPORT:
4504 psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4505 break;
4506 case VK_DYNAMIC_STATE_SCISSOR:
4507 psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4508 break;
4509 case VK_DYNAMIC_STATE_LINE_WIDTH:
4510 psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4511 break;
4512 case VK_DYNAMIC_STATE_DEPTH_BIAS:
4513 psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4514 break;
4515 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4516 psoDynStateMask &= ~CBSTATUS_BLEND_SET;
4517 break;
4518 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4519 psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4520 break;
4521 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4522 psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4523 break;
4524 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4525 psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4526 break;
4527 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4528 psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4529 break;
4530 default:
4531 // TODO : Flag error here
4532 break;
4533 }
4534 }
4535 pCB->status |= psoDynStateMask;
4536 }
4537 }
4538
4539 // Print the last bound Gfx Pipeline
printPipeline(layer_data * my_data,const VkCommandBuffer cb)4540 static VkBool32 printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4541 VkBool32 skipCall = VK_FALSE;
4542 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4543 if (pCB) {
4544 PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4545 if (!pPipeTrav) {
4546 // nothing to print
4547 } else {
4548 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4549 __LINE__, DRAWSTATE_NONE, "DS", "%s",
4550 vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
4551 }
4552 }
4553 return skipCall;
4554 }
4555
printCB(layer_data * my_data,const VkCommandBuffer cb)4556 static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4557 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4558 if (pCB && pCB->cmds.size() > 0) {
4559 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4560 DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4561 vector<CMD_NODE> cmds = pCB->cmds;
4562 for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4563 // TODO : Need to pass cb as srcObj here
4564 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4565 __LINE__, DRAWSTATE_NONE, "DS", " CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4566 }
4567 } else {
4568 // Nothing to print
4569 }
4570 }
4571
synchAndPrintDSConfig(layer_data * my_data,const VkCommandBuffer cb)4572 static VkBool32 synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4573 VkBool32 skipCall = VK_FALSE;
4574 if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4575 return skipCall;
4576 }
4577 skipCall |= printPipeline(my_data, cb);
4578 return skipCall;
4579 }
4580
4581 // Flags validation error if the associated call is made inside a render pass. The apiName
4582 // routine should ONLY be called outside a render pass.
insideRenderPass(const layer_data * my_data,GLOBAL_CB_NODE * pCB,const char * apiName)4583 static VkBool32 insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4584 VkBool32 inside = VK_FALSE;
4585 if (pCB->activeRenderPass) {
4586 inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4587 (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4588 "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4589 (uint64_t)pCB->activeRenderPass);
4590 }
4591 return inside;
4592 }
4593
4594 // Flags validation error if the associated call is made outside a render pass. The apiName
4595 // routine should ONLY be called inside a render pass.
outsideRenderPass(const layer_data * my_data,GLOBAL_CB_NODE * pCB,const char * apiName)4596 static VkBool32 outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4597 VkBool32 outside = VK_FALSE;
4598 if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4599 ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4600 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4601 outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4602 (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4603 "%s: This call must be issued inside an active render pass.", apiName);
4604 }
4605 return outside;
4606 }
4607
init_core_validation(layer_data * my_data,const VkAllocationCallbacks * pAllocator)4608 static void init_core_validation(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
4609
4610 layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_core_validation");
4611
4612 if (!globalLockInitialized) {
4613 loader_platform_thread_create_mutex(&globalLock);
4614 globalLockInitialized = 1;
4615 }
4616 #if MTMERGESOURCE
4617 // Zero out memory property data
4618 memset(&memProps, 0, sizeof(VkPhysicalDeviceMemoryProperties));
4619 #endif
4620 }
4621
4622 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkCreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)4623 vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4624 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4625
4626 assert(chain_info->u.pLayerInfo);
4627 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4628 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4629 if (fpCreateInstance == NULL)
4630 return VK_ERROR_INITIALIZATION_FAILED;
4631
4632 // Advance the link info for the next element on the chain
4633 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4634
4635 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4636 if (result != VK_SUCCESS)
4637 return result;
4638
4639 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4640 my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4641 layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
4642
4643 my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
4644 pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4645
4646 init_core_validation(my_data, pAllocator);
4647
4648 ValidateLayerOrdering(*pCreateInfo);
4649
4650 return result;
4651 }
4652
4653 /* hook DestroyInstance to remove tableInstanceMap entry */
vkDestroyInstance(VkInstance instance,const VkAllocationCallbacks * pAllocator)4654 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4655 // TODOSC : Shouldn't need any customization here
4656 dispatch_key key = get_dispatch_key(instance);
4657 // TBD: Need any locking this early, in case this function is called at the
4658 // same time by more than one thread?
4659 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4660 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4661 pTable->DestroyInstance(instance, pAllocator);
4662
4663 loader_platform_thread_lock_mutex(&globalLock);
4664 // Clean up logging callback, if any
4665 while (my_data->logging_callback.size() > 0) {
4666 VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4667 layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4668 my_data->logging_callback.pop_back();
4669 }
4670
4671 layer_debug_report_destroy_instance(my_data->report_data);
4672 delete my_data->instance_dispatch_table;
4673 layer_data_map.erase(key);
4674 loader_platform_thread_unlock_mutex(&globalLock);
4675 if (layer_data_map.empty()) {
4676 // Release mutex when destroying last instance.
4677 loader_platform_thread_delete_mutex(&globalLock);
4678 globalLockInitialized = 0;
4679 }
4680 }
4681
createDeviceRegisterExtensions(const VkDeviceCreateInfo * pCreateInfo,VkDevice device)4682 static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4683 uint32_t i;
4684 // TBD: Need any locking, in case this function is called at the same time
4685 // by more than one thread?
4686 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4687 dev_data->device_extensions.wsi_enabled = false;
4688
4689 VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4690 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4691 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4692 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4693 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4694 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4695 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4696
4697 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4698 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4699 dev_data->device_extensions.wsi_enabled = true;
4700 }
4701 }
4702
vkCreateDevice(VkPhysicalDevice gpu,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)4703 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4704 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4705 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4706
4707 assert(chain_info->u.pLayerInfo);
4708 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4709 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4710 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4711 if (fpCreateDevice == NULL) {
4712 return VK_ERROR_INITIALIZATION_FAILED;
4713 }
4714
4715 // Advance the link info for the next element on the chain
4716 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4717
4718 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4719 if (result != VK_SUCCESS) {
4720 return result;
4721 }
4722
4723 loader_platform_thread_lock_mutex(&globalLock);
4724 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4725 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4726
4727 // Setup device dispatch table
4728 my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4729 layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4730
4731 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4732 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4733 // Get physical device limits for this device
4734 my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->physDevProperties.properties));
4735 uint32_t count;
4736 my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4737 my_device_data->physDevProperties.queue_family_properties.resize(count);
4738 my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4739 gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
4740 // TODO: device limits should make sure these are compatible
4741 if (pCreateInfo->pEnabledFeatures) {
4742 my_device_data->physDevProperties.features = *pCreateInfo->pEnabledFeatures;
4743 } else {
4744 memset(&my_device_data->physDevProperties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4745 }
4746 loader_platform_thread_unlock_mutex(&globalLock);
4747
4748 ValidateLayerOrdering(*pCreateInfo);
4749
4750 return result;
4751 }
4752
4753 // prototype
4754 static void deleteRenderPasses(layer_data *);
vkDestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)4755 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4756 // TODOSC : Shouldn't need any customization here
4757 dispatch_key key = get_dispatch_key(device);
4758 layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4759 // Free all the memory
4760 loader_platform_thread_lock_mutex(&globalLock);
4761 deletePipelines(dev_data);
4762 deleteRenderPasses(dev_data);
4763 deleteCommandBuffers(dev_data);
4764 deletePools(dev_data);
4765 deleteLayouts(dev_data);
4766 dev_data->imageViewMap.clear();
4767 dev_data->imageMap.clear();
4768 dev_data->imageSubresourceMap.clear();
4769 dev_data->imageLayoutMap.clear();
4770 dev_data->bufferViewMap.clear();
4771 dev_data->bufferMap.clear();
4772 loader_platform_thread_unlock_mutex(&globalLock);
4773 #if MTMERGESOURCE
4774 VkBool32 skipCall = VK_FALSE;
4775 loader_platform_thread_lock_mutex(&globalLock);
4776 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4777 (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4778 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4779 (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4780 print_mem_list(dev_data, device);
4781 printCBList(dev_data, device);
4782 delete_cmd_buf_info_list(dev_data);
4783 // Report any memory leaks
4784 DEVICE_MEM_INFO *pInfo = NULL;
4785 if (dev_data->memObjMap.size() > 0) {
4786 for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4787 pInfo = &(*ii).second;
4788 if (pInfo->allocInfo.allocationSize != 0) {
4789 // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4790 skipCall |=
4791 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4792 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4793 "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4794 "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4795 (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4796 }
4797 }
4798 }
4799 // Queues persist until device is destroyed
4800 delete_queue_info_list(dev_data);
4801 layer_debug_report_destroy_device(device);
4802 loader_platform_thread_unlock_mutex(&globalLock);
4803
4804 #if DISPATCH_MAP_DEBUG
4805 fprintf(stderr, "Device: %p, key: %p\n", device, key);
4806 #endif
4807 VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4808 if (VK_FALSE == skipCall) {
4809 pDisp->DestroyDevice(device, pAllocator);
4810 }
4811 #else
4812 dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4813 #endif
4814 delete dev_data->device_dispatch_table;
4815 layer_data_map.erase(key);
4816 }
4817
4818 #if MTMERGESOURCE
4819 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * pMemoryProperties)4820 vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
4821 layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
4822 VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
4823 pInstanceTable->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
4824 memcpy(&memProps, pMemoryProperties, sizeof(VkPhysicalDeviceMemoryProperties));
4825 }
4826 #endif
4827
4828 static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4829
4830 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkEnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pCount,VkExtensionProperties * pProperties)4831 vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4832 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4833 }
4834
4835 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkEnumerateInstanceLayerProperties(uint32_t * pCount,VkLayerProperties * pProperties)4836 vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4837 return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4838 }
4839
4840 // TODO: Why does this exist - can we just use global?
4841 static const VkLayerProperties cv_device_layers[] = {{
4842 "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
4843 }};
4844
vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pCount,VkExtensionProperties * pProperties)4845 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4846 const char *pLayerName, uint32_t *pCount,
4847 VkExtensionProperties *pProperties) {
4848 if (pLayerName == NULL) {
4849 dispatch_key key = get_dispatch_key(physicalDevice);
4850 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4851 return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4852 } else {
4853 return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4854 }
4855 }
4856
4857 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,uint32_t * pCount,VkLayerProperties * pProperties)4858 vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4859 /* draw_state physical device layers are the same as global */
4860 return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4861 }
4862
4863 // This validates that the initial layout specified in the command buffer for
4864 // the IMAGE is the same
4865 // as the global IMAGE layout
ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer)4866 VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4867 VkBool32 skip_call = VK_FALSE;
4868 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4869 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4870 for (auto cb_image_data : pCB->imageLayoutMap) {
4871 VkImageLayout imageLayout;
4872 if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4873 skip_call |=
4874 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4875 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4876 reinterpret_cast<const uint64_t &>(cb_image_data.first));
4877 } else {
4878 if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4879 // TODO: Set memory invalid which is in mem_tracker currently
4880 } else if (imageLayout != cb_image_data.second.initialLayout) {
4881 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4882 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT,
4883 "DS", "Cannot submit cmd buffer using image with layout %s when "
4884 "first use is %s.",
4885 string_VkImageLayout(imageLayout), string_VkImageLayout(cb_image_data.second.initialLayout));
4886 }
4887 SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4888 }
4889 }
4890 return skip_call;
4891 }
4892 // Track which resources are in-flight by atomically incrementing their "in_use" count
validateAndIncrementResources(layer_data * my_data,GLOBAL_CB_NODE * pCB)4893 VkBool32 validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4894 VkBool32 skip_call = VK_FALSE;
4895 for (auto drawDataElement : pCB->drawData) {
4896 for (auto buffer : drawDataElement.buffers) {
4897 auto buffer_data = my_data->bufferMap.find(buffer);
4898 if (buffer_data == my_data->bufferMap.end()) {
4899 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4900 (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4901 "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4902 } else {
4903 buffer_data->second.in_use.fetch_add(1);
4904 }
4905 }
4906 }
4907 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4908 for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4909 auto setNode = my_data->setMap.find(set);
4910 if (setNode == my_data->setMap.end()) {
4911 skip_call |=
4912 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4913 (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4914 "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4915 } else {
4916 setNode->second->in_use.fetch_add(1);
4917 }
4918 }
4919 }
4920 for (auto semaphore : pCB->semaphores) {
4921 auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4922 if (semaphoreNode == my_data->semaphoreMap.end()) {
4923 skip_call |=
4924 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4925 reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4926 "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4927 } else {
4928 semaphoreNode->second.in_use.fetch_add(1);
4929 }
4930 }
4931 for (auto event : pCB->events) {
4932 auto eventNode = my_data->eventMap.find(event);
4933 if (eventNode == my_data->eventMap.end()) {
4934 skip_call |=
4935 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4936 reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4937 "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4938 } else {
4939 eventNode->second.in_use.fetch_add(1);
4940 }
4941 }
4942 return skip_call;
4943 }
4944
decrementResources(layer_data * my_data,VkCommandBuffer cmdBuffer)4945 void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4946 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4947 for (auto drawDataElement : pCB->drawData) {
4948 for (auto buffer : drawDataElement.buffers) {
4949 auto buffer_data = my_data->bufferMap.find(buffer);
4950 if (buffer_data != my_data->bufferMap.end()) {
4951 buffer_data->second.in_use.fetch_sub(1);
4952 }
4953 }
4954 }
4955 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4956 for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4957 auto setNode = my_data->setMap.find(set);
4958 if (setNode != my_data->setMap.end()) {
4959 setNode->second->in_use.fetch_sub(1);
4960 }
4961 }
4962 }
4963 for (auto semaphore : pCB->semaphores) {
4964 auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4965 if (semaphoreNode != my_data->semaphoreMap.end()) {
4966 semaphoreNode->second.in_use.fetch_sub(1);
4967 }
4968 }
4969 for (auto event : pCB->events) {
4970 auto eventNode = my_data->eventMap.find(event);
4971 if (eventNode != my_data->eventMap.end()) {
4972 eventNode->second.in_use.fetch_sub(1);
4973 }
4974 }
4975 for (auto queryStatePair : pCB->queryToStateMap) {
4976 my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4977 }
4978 for (auto eventStagePair : pCB->eventToStageMap) {
4979 my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4980 }
4981 }
4982
decrementResources(layer_data * my_data,uint32_t fenceCount,const VkFence * pFences)4983 void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4984 for (uint32_t i = 0; i < fenceCount; ++i) {
4985 auto fence_data = my_data->fenceMap.find(pFences[i]);
4986 if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4987 return;
4988 fence_data->second.needsSignaled = false;
4989 fence_data->second.in_use.fetch_sub(1);
4990 decrementResources(my_data, fence_data->second.priorFences.size(), fence_data->second.priorFences.data());
4991 for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4992 decrementResources(my_data, cmdBuffer);
4993 }
4994 }
4995 }
4996
decrementResources(layer_data * my_data,VkQueue queue)4997 void decrementResources(layer_data *my_data, VkQueue queue) {
4998 auto queue_data = my_data->queueMap.find(queue);
4999 if (queue_data != my_data->queueMap.end()) {
5000 for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
5001 decrementResources(my_data, cmdBuffer);
5002 }
5003 queue_data->second.untrackedCmdBuffers.clear();
5004 decrementResources(my_data, queue_data->second.lastFences.size(), queue_data->second.lastFences.data());
5005 }
5006 }
5007
updateTrackedCommandBuffers(layer_data * dev_data,VkQueue queue,VkQueue other_queue,VkFence fence)5008 void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
5009 if (queue == other_queue) {
5010 return;
5011 }
5012 auto queue_data = dev_data->queueMap.find(queue);
5013 auto other_queue_data = dev_data->queueMap.find(other_queue);
5014 if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
5015 return;
5016 }
5017 for (auto fence : other_queue_data->second.lastFences) {
5018 queue_data->second.lastFences.push_back(fence);
5019 }
5020 if (fence != VK_NULL_HANDLE) {
5021 auto fence_data = dev_data->fenceMap.find(fence);
5022 if (fence_data == dev_data->fenceMap.end()) {
5023 return;
5024 }
5025 for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5026 fence_data->second.cmdBuffers.push_back(cmdbuffer);
5027 }
5028 other_queue_data->second.untrackedCmdBuffers.clear();
5029 } else {
5030 for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5031 queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
5032 }
5033 other_queue_data->second.untrackedCmdBuffers.clear();
5034 }
5035 for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
5036 queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
5037 }
5038 }
5039
trackCommandBuffers(layer_data * my_data,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)5040 void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5041 auto queue_data = my_data->queueMap.find(queue);
5042 if (fence != VK_NULL_HANDLE) {
5043 vector<VkFence> prior_fences;
5044 auto fence_data = my_data->fenceMap.find(fence);
5045 if (fence_data == my_data->fenceMap.end()) {
5046 return;
5047 }
5048 if (queue_data != my_data->queueMap.end()) {
5049 prior_fences = queue_data->second.lastFences;
5050 queue_data->second.lastFences.clear();
5051 queue_data->second.lastFences.push_back(fence);
5052 for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
5053 fence_data->second.cmdBuffers.push_back(cmdbuffer);
5054 }
5055 queue_data->second.untrackedCmdBuffers.clear();
5056 }
5057 fence_data->second.cmdBuffers.clear();
5058 fence_data->second.priorFences = prior_fences;
5059 fence_data->second.needsSignaled = true;
5060 fence_data->second.queue = queue;
5061 fence_data->second.in_use.fetch_add(1);
5062 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5063 const VkSubmitInfo *submit = &pSubmits[submit_idx];
5064 for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5065 for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5066 fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
5067 }
5068 fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
5069 }
5070 }
5071 } else {
5072 if (queue_data != my_data->queueMap.end()) {
5073 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5074 const VkSubmitInfo *submit = &pSubmits[submit_idx];
5075 for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5076 for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5077 queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
5078 }
5079 queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
5080 }
5081 }
5082 }
5083 }
5084 if (queue_data != my_data->queueMap.end()) {
5085 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5086 const VkSubmitInfo *submit = &pSubmits[submit_idx];
5087 for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5088 // Add cmdBuffers to both the global set and queue set
5089 for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5090 my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5091 queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer);
5092 }
5093 my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5094 queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5095 }
5096 }
5097 }
5098 }
5099
validateCommandBufferSimultaneousUse(layer_data * dev_data,GLOBAL_CB_NODE * pCB)5100 bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5101 bool skip_call = false;
5102 if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5103 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5104 skip_call |=
5105 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5106 __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Command Buffer %#" PRIx64 " is already in use and is not marked "
5107 "for simultaneous use.",
5108 reinterpret_cast<uint64_t>(pCB->commandBuffer));
5109 }
5110 return skip_call;
5111 }
5112
validateCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * pCB)5113 static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5114 bool skipCall = false;
5115 // Validate that cmd buffers have been updated
5116 if (CB_RECORDED != pCB->state) {
5117 if (CB_INVALID == pCB->state) {
5118 // Inform app of reason CB invalid
5119 bool causeReported = false;
5120 if (!pCB->destroyedSets.empty()) {
5121 std::stringstream set_string;
5122 for (auto set : pCB->destroyedSets)
5123 set_string << " " << set;
5124
5125 skipCall |=
5126 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5127 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5128 "You are submitting command buffer %#" PRIxLEAST64
5129 " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5130 (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5131 causeReported = true;
5132 }
5133 if (!pCB->updatedSets.empty()) {
5134 std::stringstream set_string;
5135 for (auto set : pCB->updatedSets)
5136 set_string << " " << set;
5137
5138 skipCall |=
5139 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5140 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5141 "You are submitting command buffer %#" PRIxLEAST64
5142 " that is invalid because it had the following bound descriptor set(s) updated: %s",
5143 (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5144 causeReported = true;
5145 }
5146 if (!pCB->destroyedFramebuffers.empty()) {
5147 std::stringstream fb_string;
5148 for (auto fb : pCB->destroyedFramebuffers)
5149 fb_string << " " << fb;
5150
5151 skipCall |=
5152 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5153 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5154 "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5155 "referenced framebuffers destroyed: %s",
5156 reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5157 causeReported = true;
5158 }
5159 // TODO : This is defensive programming to make sure an error is
5160 // flagged if we hit this INVALID cmd buffer case and none of the
5161 // above cases are hit. As the number of INVALID cases grows, this
5162 // code should be updated to seemlessly handle all the cases.
5163 if (!causeReported) {
5164 skipCall |= log_msg(
5165 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5166 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5167 "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5168 "should "
5169 "be improved to report the exact cause.",
5170 reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5171 }
5172 } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5173 skipCall |=
5174 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5175 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5176 "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5177 (uint64_t)(pCB->commandBuffer));
5178 }
5179 }
5180 return skipCall;
5181 }
5182
validatePrimaryCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * pCB)5183 static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5184 // Track in-use for resources off of primary and any secondary CBs
5185 VkBool32 skipCall = validateAndIncrementResources(dev_data, pCB);
5186 if (!pCB->secondaryCommandBuffers.empty()) {
5187 for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5188 skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5189 GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5190 if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5191 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5192 __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5193 "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5194 " but that buffer has subsequently been bound to "
5195 "primary cmd buffer %#" PRIxLEAST64 ".",
5196 reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5197 reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5198 }
5199 }
5200 }
5201 // TODO : Verify if this also needs to be checked for secondary command
5202 // buffers. If so, this block of code can move to
5203 // validateCommandBufferState() function. vulkan GL106 filed to clarify
5204 if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5205 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5206 __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5207 "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5208 "set, but has been submitted %#" PRIxLEAST64 " times.",
5209 (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5210 }
5211 skipCall |= validateCommandBufferState(dev_data, pCB);
5212 // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5213 // on device
5214 skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5215 return skipCall;
5216 }
5217
5218 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkQueueSubmit(VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)5219 vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5220 VkBool32 skipCall = VK_FALSE;
5221 GLOBAL_CB_NODE *pCBNode = NULL;
5222 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5223 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5224 loader_platform_thread_lock_mutex(&globalLock);
5225 #if MTMERGESOURCE
5226 // TODO : Need to track fence and clear mem references when fence clears
5227 // MTMTODO : Merge this code with code below to avoid duplicating efforts
5228 uint64_t fenceId = 0;
5229 skipCall = add_fence_info(dev_data, fence, queue, &fenceId);
5230
5231 print_mem_list(dev_data, queue);
5232 printCBList(dev_data, queue);
5233 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5234 const VkSubmitInfo *submit = &pSubmits[submit_idx];
5235 for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5236 pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5237 if (pCBNode) {
5238 pCBNode->fenceId = fenceId;
5239 pCBNode->lastSubmittedFence = fence;
5240 pCBNode->lastSubmittedQueue = queue;
5241 for (auto &function : pCBNode->validate_functions) {
5242 skipCall |= function();
5243 }
5244 for (auto &function : pCBNode->eventUpdates) {
5245 skipCall |= static_cast<VkBool32>(function(queue));
5246 }
5247 }
5248 }
5249
5250 for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5251 VkSemaphore sem = submit->pWaitSemaphores[i];
5252
5253 if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5254 if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
5255 skipCall =
5256 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5257 (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
5258 "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
5259 }
5260 dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
5261 }
5262 }
5263 for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
5264 VkSemaphore sem = submit->pSignalSemaphores[i];
5265
5266 if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5267 if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
5268 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5269 VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)sem, __LINE__, MEMTRACK_NONE,
5270 "SEMAPHORE", "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
5271 }
5272 dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
5273 }
5274 }
5275 }
5276 #endif
5277 // First verify that fence is not in use
5278 if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5279 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5280 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5281 "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5282 }
5283 // Now verify each individual submit
5284 std::unordered_set<VkQueue> processed_other_queues;
5285 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5286 const VkSubmitInfo *submit = &pSubmits[submit_idx];
5287 vector<VkSemaphore> semaphoreList;
5288 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5289 const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5290 semaphoreList.push_back(semaphore);
5291 if (dev_data->semaphoreMap[semaphore].signaled) {
5292 dev_data->semaphoreMap[semaphore].signaled = 0;
5293 } else {
5294 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5295 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5296 "DS", "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5297 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5298 }
5299 const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5300 if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5301 updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5302 processed_other_queues.insert(other_queue);
5303 }
5304 }
5305 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5306 const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5307 semaphoreList.push_back(semaphore);
5308 if (dev_data->semaphoreMap[semaphore].signaled) {
5309 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5310 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5311 "DS", "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5312 " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5313 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5314 reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5315 } else {
5316 dev_data->semaphoreMap[semaphore].signaled = 1;
5317 dev_data->semaphoreMap[semaphore].queue = queue;
5318 }
5319 }
5320 for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5321 skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5322 pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5323 pCBNode->semaphores = semaphoreList;
5324 pCBNode->submitCount++; // increment submit count
5325 skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5326 }
5327 }
5328 // Update cmdBuffer-related data structs and mark fence in-use
5329 trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5330 loader_platform_thread_unlock_mutex(&globalLock);
5331 if (VK_FALSE == skipCall)
5332 result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5333 #if MTMERGESOURCE
5334 loader_platform_thread_lock_mutex(&globalLock);
5335 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5336 const VkSubmitInfo *submit = &pSubmits[submit_idx];
5337 for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5338 VkSemaphore sem = submit->pWaitSemaphores[i];
5339
5340 if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5341 dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
5342 }
5343 }
5344 }
5345 loader_platform_thread_unlock_mutex(&globalLock);
5346 #endif
5347 return result;
5348 }
5349
5350 #if MTMERGESOURCE
vkAllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)5351 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5352 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5353 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5354 VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5355 // TODO : Track allocations and overall size here
5356 loader_platform_thread_lock_mutex(&globalLock);
5357 add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5358 print_mem_list(my_data, device);
5359 loader_platform_thread_unlock_mutex(&globalLock);
5360 return result;
5361 }
5362
5363 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkFreeMemory(VkDevice device,VkDeviceMemory mem,const VkAllocationCallbacks * pAllocator)5364 vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5365 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5366
5367 // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5368 // Before freeing a memory object, an application must ensure the memory object is no longer
5369 // in use by the device—for example by command buffers queued for execution. The memory need
5370 // not yet be unbound from all images and buffers, but any further use of those images or
5371 // buffers (on host or device) for anything other than destroying those objects will result in
5372 // undefined behavior.
5373
5374 loader_platform_thread_lock_mutex(&globalLock);
5375 freeMemObjInfo(my_data, device, mem, VK_FALSE);
5376 print_mem_list(my_data, device);
5377 printCBList(my_data, device);
5378 loader_platform_thread_unlock_mutex(&globalLock);
5379 my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5380 }
5381
validateMemRange(layer_data * my_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size)5382 VkBool32 validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5383 VkBool32 skipCall = VK_FALSE;
5384
5385 if (size == 0) {
5386 // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5387 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5388 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5389 "VkMapMemory: Attempting to map memory range of size zero");
5390 }
5391
5392 auto mem_element = my_data->memObjMap.find(mem);
5393 if (mem_element != my_data->memObjMap.end()) {
5394 // It is an application error to call VkMapMemory on an object that is already mapped
5395 if (mem_element->second.memRange.size != 0) {
5396 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5397 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5398 "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5399 }
5400
5401 // Validate that offset + size is within object's allocationSize
5402 if (size == VK_WHOLE_SIZE) {
5403 if (offset >= mem_element->second.allocInfo.allocationSize) {
5404 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5405 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5406 "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5407 mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5408 }
5409 } else {
5410 if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5411 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5412 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5413 "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5414 size + offset, mem_element->second.allocInfo.allocationSize);
5415 }
5416 }
5417 }
5418 return skipCall;
5419 }
5420
storeMemRanges(layer_data * my_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size)5421 void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5422 auto mem_element = my_data->memObjMap.find(mem);
5423 if (mem_element != my_data->memObjMap.end()) {
5424 MemRange new_range;
5425 new_range.offset = offset;
5426 new_range.size = size;
5427 mem_element->second.memRange = new_range;
5428 }
5429 }
5430
deleteMemRanges(layer_data * my_data,VkDeviceMemory mem)5431 VkBool32 deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5432 VkBool32 skipCall = VK_FALSE;
5433 auto mem_element = my_data->memObjMap.find(mem);
5434 if (mem_element != my_data->memObjMap.end()) {
5435 if (!mem_element->second.memRange.size) {
5436 // Valid Usage: memory must currently be mapped
5437 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5438 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5439 "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5440 }
5441 mem_element->second.memRange.size = 0;
5442 if (mem_element->second.pData) {
5443 free(mem_element->second.pData);
5444 mem_element->second.pData = 0;
5445 }
5446 }
5447 return skipCall;
5448 }
5449
5450 static char NoncoherentMemoryFillValue = 0xb;
5451
initializeAndTrackMemory(layer_data * my_data,VkDeviceMemory mem,VkDeviceSize size,void ** ppData)5452 void initializeAndTrackMemory(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5453 auto mem_element = my_data->memObjMap.find(mem);
5454 if (mem_element != my_data->memObjMap.end()) {
5455 mem_element->second.pDriverData = *ppData;
5456 uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5457 if (memProps.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5458 mem_element->second.pData = 0;
5459 } else {
5460 if (size == VK_WHOLE_SIZE) {
5461 size = mem_element->second.allocInfo.allocationSize;
5462 }
5463 size_t convSize = (size_t)(size);
5464 mem_element->second.pData = malloc(2 * convSize);
5465 memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5466 *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5467 }
5468 }
5469 }
5470 #endif
5471 // Note: This function assumes that the global lock is held by the calling
5472 // thread.
cleanInFlightCmdBuffer(layer_data * my_data,VkCommandBuffer cmdBuffer)5473 VkBool32 cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5474 VkBool32 skip_call = VK_FALSE;
5475 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5476 if (pCB) {
5477 for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
5478 for (auto event : queryEventsPair.second) {
5479 if (my_data->eventMap[event].needsSignaled) {
5480 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5481 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
5482 "Cannot get query results on queryPool %" PRIu64
5483 " with index %d which was guarded by unsignaled event %" PRIu64 ".",
5484 (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
5485 }
5486 }
5487 }
5488 }
5489 return skip_call;
5490 }
5491 // Remove given cmd_buffer from the global inFlight set.
5492 // Also, if given queue is valid, then remove the cmd_buffer from that queues
5493 // inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer
5494 // is still in flight on another queue, add it back into the global set.
5495 // Note: This function assumes that the global lock is held by the calling
5496 // thread.
removeInFlightCmdBuffer(layer_data * dev_data,VkCommandBuffer cmd_buffer,VkQueue queue)5497 static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
5498 // Pull it off of global list initially, but if we find it in any other queue list, add it back in
5499 dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5500 if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5501 dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
5502 for (auto q : dev_data->queues) {
5503 if ((q != queue) &&
5504 (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
5505 dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
5506 break;
5507 }
5508 }
5509 }
5510 }
5511 #if MTMERGESOURCE
verifyFenceStatus(VkDevice device,VkFence fence,const char * apiCall)5512 static inline bool verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
5513 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5514 VkBool32 skipCall = false;
5515 auto pFenceInfo = my_data->fenceMap.find(fence);
5516 if (pFenceInfo != my_data->fenceMap.end()) {
5517 if (pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5518 if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) &&
5519 pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5520 skipCall |=
5521 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5522 (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5523 "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5524 }
5525 if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5526 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5527 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5528 "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5529 "acquire next image.",
5530 apiCall, reinterpret_cast<uint64_t &>(fence));
5531 }
5532 } else {
5533 pFenceInfo->second.firstTimeFlag = VK_FALSE;
5534 }
5535 }
5536 return skipCall;
5537 }
5538 #endif
5539 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkWaitForFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)5540 vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5541 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5542 VkBool32 skip_call = VK_FALSE;
5543 #if MTMERGESOURCE
5544 // Verify fence status of submitted fences
5545 loader_platform_thread_lock_mutex(&globalLock);
5546 for (uint32_t i = 0; i < fenceCount; i++) {
5547 skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
5548 }
5549 loader_platform_thread_unlock_mutex(&globalLock);
5550 if (skip_call)
5551 return VK_ERROR_VALIDATION_FAILED_EXT;
5552 #endif
5553 VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5554
5555 if (result == VK_SUCCESS) {
5556 loader_platform_thread_lock_mutex(&globalLock);
5557 // When we know that all fences are complete we can clean/remove their CBs
5558 if (waitAll || fenceCount == 1) {
5559 for (uint32_t i = 0; i < fenceCount; ++i) {
5560 #if MTMERGESOURCE
5561 update_fence_tracking(dev_data, pFences[i]);
5562 #endif
5563 VkQueue fence_queue = dev_data->fenceMap[pFences[i]].queue;
5564 for (auto cmdBuffer : dev_data->fenceMap[pFences[i]].cmdBuffers) {
5565 skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5566 removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5567 }
5568 }
5569 decrementResources(dev_data, fenceCount, pFences);
5570 }
5571 // NOTE : Alternate case not handled here is when some fences have completed. In
5572 // this case for app to guarantee which fences completed it will have to call
5573 // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5574 loader_platform_thread_unlock_mutex(&globalLock);
5575 }
5576 if (VK_FALSE != skip_call)
5577 return VK_ERROR_VALIDATION_FAILED_EXT;
5578 return result;
5579 }
5580
vkGetFenceStatus(VkDevice device,VkFence fence)5581 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5582 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5583 bool skipCall = false;
5584 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5585 #if MTMERGESOURCE
5586 loader_platform_thread_lock_mutex(&globalLock);
5587 skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
5588 loader_platform_thread_unlock_mutex(&globalLock);
5589 if (skipCall)
5590 return result;
5591 #endif
5592 result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5593 VkBool32 skip_call = VK_FALSE;
5594 loader_platform_thread_lock_mutex(&globalLock);
5595 if (result == VK_SUCCESS) {
5596 #if MTMERGESOURCE
5597 update_fence_tracking(dev_data, fence);
5598 #endif
5599 auto fence_queue = dev_data->fenceMap[fence].queue;
5600 for (auto cmdBuffer : dev_data->fenceMap[fence].cmdBuffers) {
5601 skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5602 removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5603 }
5604 decrementResources(dev_data, 1, &fence);
5605 }
5606 loader_platform_thread_unlock_mutex(&globalLock);
5607 if (VK_FALSE != skip_call)
5608 return VK_ERROR_VALIDATION_FAILED_EXT;
5609 return result;
5610 }
5611
5612 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkGetDeviceQueue(VkDevice device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)5613 vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
5614 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5615 dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5616 loader_platform_thread_lock_mutex(&globalLock);
5617 dev_data->queues.push_back(*pQueue);
5618 QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5619 pQNode->device = device;
5620 #if MTMERGESOURCE
5621 pQNode->lastRetiredId = 0;
5622 pQNode->lastSubmittedId = 0;
5623 #endif
5624 loader_platform_thread_unlock_mutex(&globalLock);
5625 }
5626
vkQueueWaitIdle(VkQueue queue)5627 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5628 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5629 decrementResources(dev_data, queue);
5630 VkBool32 skip_call = VK_FALSE;
5631 loader_platform_thread_lock_mutex(&globalLock);
5632 // Iterate over local set since we erase set members as we go in for loop
5633 auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers;
5634 for (auto cmdBuffer : local_cb_set) {
5635 skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5636 removeInFlightCmdBuffer(dev_data, cmdBuffer, queue);
5637 }
5638 dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5639 loader_platform_thread_unlock_mutex(&globalLock);
5640 if (VK_FALSE != skip_call)
5641 return VK_ERROR_VALIDATION_FAILED_EXT;
5642 VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5643 #if MTMERGESOURCE
5644 if (VK_SUCCESS == result) {
5645 loader_platform_thread_lock_mutex(&globalLock);
5646 retire_queue_fences(dev_data, queue);
5647 loader_platform_thread_unlock_mutex(&globalLock);
5648 }
5649 #endif
5650 return result;
5651 }
5652
vkDeviceWaitIdle(VkDevice device)5653 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5654 VkBool32 skip_call = VK_FALSE;
5655 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5656 loader_platform_thread_lock_mutex(&globalLock);
5657 for (auto queue : dev_data->queues) {
5658 decrementResources(dev_data, queue);
5659 if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5660 // Clear all of the queue inFlightCmdBuffers (global set cleared below)
5661 dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5662 }
5663 }
5664 for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5665 skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5666 }
5667 dev_data->globalInFlightCmdBuffers.clear();
5668 loader_platform_thread_unlock_mutex(&globalLock);
5669 if (VK_FALSE != skip_call)
5670 return VK_ERROR_VALIDATION_FAILED_EXT;
5671 VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5672 #if MTMERGESOURCE
5673 if (VK_SUCCESS == result) {
5674 loader_platform_thread_lock_mutex(&globalLock);
5675 retire_device_fences(dev_data, device);
5676 loader_platform_thread_unlock_mutex(&globalLock);
5677 }
5678 #endif
5679 return result;
5680 }
5681
vkDestroyFence(VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)5682 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5683 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5684 bool skipCall = false;
5685 loader_platform_thread_lock_mutex(&globalLock);
5686 if (dev_data->fenceMap[fence].in_use.load()) {
5687 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5688 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5689 "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5690 }
5691 #if MTMERGESOURCE
5692 delete_fence_info(dev_data, fence);
5693 auto item = dev_data->fenceMap.find(fence);
5694 if (item != dev_data->fenceMap.end()) {
5695 dev_data->fenceMap.erase(item);
5696 }
5697 #endif
5698 loader_platform_thread_unlock_mutex(&globalLock);
5699 if (!skipCall)
5700 dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5701 }
5702
5703 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroySemaphore(VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)5704 vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5705 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5706 dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5707 loader_platform_thread_lock_mutex(&globalLock);
5708 auto item = dev_data->semaphoreMap.find(semaphore);
5709 if (item != dev_data->semaphoreMap.end()) {
5710 if (item->second.in_use.load()) {
5711 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5712 reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5713 "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5714 }
5715 dev_data->semaphoreMap.erase(semaphore);
5716 }
5717 loader_platform_thread_unlock_mutex(&globalLock);
5718 // TODO : Clean up any internal data structures using this obj.
5719 }
5720
vkDestroyEvent(VkDevice device,VkEvent event,const VkAllocationCallbacks * pAllocator)5721 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5722 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5723 bool skip_call = false;
5724 loader_platform_thread_lock_mutex(&globalLock);
5725 auto event_data = dev_data->eventMap.find(event);
5726 if (event_data != dev_data->eventMap.end()) {
5727 if (event_data->second.in_use.load()) {
5728 skip_call |= log_msg(
5729 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5730 reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5731 "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5732 }
5733 dev_data->eventMap.erase(event_data);
5734 }
5735 loader_platform_thread_unlock_mutex(&globalLock);
5736 if (!skip_call)
5737 dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5738 // TODO : Clean up any internal data structures using this obj.
5739 }
5740
5741 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyQueryPool(VkDevice device,VkQueryPool queryPool,const VkAllocationCallbacks * pAllocator)5742 vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5743 get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5744 ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5745 // TODO : Clean up any internal data structures using this obj.
5746 }
5747
vkGetQueryPoolResults(VkDevice device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)5748 VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5749 uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5750 VkQueryResultFlags flags) {
5751 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5752 unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5753 GLOBAL_CB_NODE *pCB = nullptr;
5754 loader_platform_thread_lock_mutex(&globalLock);
5755 for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5756 pCB = getCBNode(dev_data, cmdBuffer);
5757 for (auto queryStatePair : pCB->queryToStateMap) {
5758 queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5759 }
5760 }
5761 VkBool32 skip_call = VK_FALSE;
5762 for (uint32_t i = 0; i < queryCount; ++i) {
5763 QueryObject query = {queryPool, firstQuery + i};
5764 auto queryElement = queriesInFlight.find(query);
5765 auto queryToStateElement = dev_data->queryToStateMap.find(query);
5766 if (queryToStateElement != dev_data->queryToStateMap.end()) {
5767 }
5768 // Available and in flight
5769 if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5770 queryToStateElement->second) {
5771 for (auto cmdBuffer : queryElement->second) {
5772 pCB = getCBNode(dev_data, cmdBuffer);
5773 auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5774 if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5775 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5776 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5777 "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5778 (uint64_t)(queryPool), firstQuery + i);
5779 } else {
5780 for (auto event : queryEventElement->second) {
5781 dev_data->eventMap[event].needsSignaled = true;
5782 }
5783 }
5784 }
5785 // Unavailable and in flight
5786 } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5787 !queryToStateElement->second) {
5788 // TODO : Can there be the same query in use by multiple command buffers in flight?
5789 bool make_available = false;
5790 for (auto cmdBuffer : queryElement->second) {
5791 pCB = getCBNode(dev_data, cmdBuffer);
5792 make_available |= pCB->queryToStateMap[query];
5793 }
5794 if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5795 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5796 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5797 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5798 (uint64_t)(queryPool), firstQuery + i);
5799 }
5800 // Unavailable
5801 } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5802 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5803 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5804 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5805 (uint64_t)(queryPool), firstQuery + i);
5806 // Unitialized
5807 } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5808 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5809 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5810 "Cannot get query results on queryPool %" PRIu64 " with index %d which is uninitialized.",
5811 (uint64_t)(queryPool), firstQuery + i);
5812 }
5813 }
5814 loader_platform_thread_unlock_mutex(&globalLock);
5815 if (skip_call)
5816 return VK_ERROR_VALIDATION_FAILED_EXT;
5817 return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5818 flags);
5819 }
5820
validateIdleBuffer(const layer_data * my_data,VkBuffer buffer)5821 VkBool32 validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5822 VkBool32 skip_call = VK_FALSE;
5823 auto buffer_data = my_data->bufferMap.find(buffer);
5824 if (buffer_data == my_data->bufferMap.end()) {
5825 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5826 (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5827 "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5828 } else {
5829 if (buffer_data->second.in_use.load()) {
5830 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5831 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5832 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5833 }
5834 }
5835 return skip_call;
5836 }
5837
5838 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyBuffer(VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)5839 vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5840 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5841 VkBool32 skipCall = VK_FALSE;
5842 loader_platform_thread_lock_mutex(&globalLock);
5843 #if MTMERGESOURCE
5844 auto item = dev_data->bufferBindingMap.find((uint64_t)buffer);
5845 if (item != dev_data->bufferBindingMap.end()) {
5846 skipCall = clear_object_binding(dev_data, device, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5847 dev_data->bufferBindingMap.erase(item);
5848 }
5849 #endif
5850 if (!validateIdleBuffer(dev_data, buffer) && (VK_FALSE == skipCall)) {
5851 loader_platform_thread_unlock_mutex(&globalLock);
5852 dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5853 loader_platform_thread_lock_mutex(&globalLock);
5854 }
5855 dev_data->bufferMap.erase(buffer);
5856 loader_platform_thread_unlock_mutex(&globalLock);
5857 }
5858
5859 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyBufferView(VkDevice device,VkBufferView bufferView,const VkAllocationCallbacks * pAllocator)5860 vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5861 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5862 dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5863 loader_platform_thread_lock_mutex(&globalLock);
5864 auto item = dev_data->bufferViewMap.find(bufferView);
5865 if (item != dev_data->bufferViewMap.end()) {
5866 dev_data->bufferViewMap.erase(item);
5867 }
5868 loader_platform_thread_unlock_mutex(&globalLock);
5869 }
5870
vkDestroyImage(VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)5871 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5872 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5873 VkBool32 skipCall = VK_FALSE;
5874 #if MTMERGESOURCE
5875 loader_platform_thread_lock_mutex(&globalLock);
5876 auto item = dev_data->imageBindingMap.find((uint64_t)image);
5877 if (item != dev_data->imageBindingMap.end()) {
5878 skipCall = clear_object_binding(dev_data, device, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5879 dev_data->imageBindingMap.erase(item);
5880 }
5881 loader_platform_thread_unlock_mutex(&globalLock);
5882 #endif
5883 if (VK_FALSE == skipCall)
5884 dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5885
5886 loader_platform_thread_lock_mutex(&globalLock);
5887 const auto& entry = dev_data->imageMap.find(image);
5888 if (entry != dev_data->imageMap.end()) {
5889 // Clear any memory mapping for this image
5890 const auto &mem_entry = dev_data->memObjMap.find(entry->second.mem);
5891 if (mem_entry != dev_data->memObjMap.end())
5892 mem_entry->second.image = VK_NULL_HANDLE;
5893
5894 // Remove image from imageMap
5895 dev_data->imageMap.erase(entry);
5896 }
5897 const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5898 if (subEntry != dev_data->imageSubresourceMap.end()) {
5899 for (const auto& pair : subEntry->second) {
5900 dev_data->imageLayoutMap.erase(pair);
5901 }
5902 dev_data->imageSubresourceMap.erase(subEntry);
5903 }
5904 loader_platform_thread_unlock_mutex(&globalLock);
5905 }
5906 #if MTMERGESOURCE
print_memory_range_error(layer_data * dev_data,const uint64_t object_handle,const uint64_t other_handle,VkDebugReportObjectTypeEXT object_type)5907 VkBool32 print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5908 VkDebugReportObjectTypeEXT object_type) {
5909 if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5910 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5911 MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5912 other_handle);
5913 } else {
5914 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5915 MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5916 other_handle);
5917 }
5918 }
5919
validate_memory_range(layer_data * dev_data,const vector<MEMORY_RANGE> & ranges,const MEMORY_RANGE & new_range,VkDebugReportObjectTypeEXT object_type)5920 VkBool32 validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5921 VkDebugReportObjectTypeEXT object_type) {
5922 VkBool32 skip_call = false;
5923
5924 for (auto range : ranges) {
5925 if ((range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) <
5926 (new_range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5927 continue;
5928 if ((range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) >
5929 (new_range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5930 continue;
5931 skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5932 }
5933 return skip_call;
5934 }
5935
validate_buffer_image_aliasing(layer_data * dev_data,uint64_t handle,VkDeviceMemory mem,VkDeviceSize memoryOffset,VkMemoryRequirements memRequirements,vector<MEMORY_RANGE> & ranges,const vector<MEMORY_RANGE> & other_ranges,VkDebugReportObjectTypeEXT object_type)5936 VkBool32 validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5937 VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
5938 const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
5939 MEMORY_RANGE range;
5940 range.handle = handle;
5941 range.memory = mem;
5942 range.start = memoryOffset;
5943 range.end = memoryOffset + memRequirements.size - 1;
5944 ranges.push_back(range);
5945 return validate_memory_range(dev_data, other_ranges, range, object_type);
5946 }
5947
5948 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkBindBufferMemory(VkDevice device,VkBuffer buffer,VkDeviceMemory mem,VkDeviceSize memoryOffset)5949 vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5950 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5951 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5952 loader_platform_thread_lock_mutex(&globalLock);
5953 // Track objects tied to memory
5954 uint64_t buffer_handle = (uint64_t)(buffer);
5955 VkBool32 skipCall =
5956 set_mem_binding(dev_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5957 add_object_binding_info(dev_data, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
5958 {
5959 VkMemoryRequirements memRequirements;
5960 // MTMTODO : Shouldn't this call down the chain?
5961 vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
5962 skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
5963 dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
5964 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5965 }
5966 print_mem_list(dev_data, device);
5967 loader_platform_thread_unlock_mutex(&globalLock);
5968 if (VK_FALSE == skipCall) {
5969 result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5970 }
5971 return result;
5972 }
5973
5974 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkGetBufferMemoryRequirements(VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)5975 vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5976 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5977 // TODO : What to track here?
5978 // Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5979 my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5980 }
5981
5982 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkGetImageMemoryRequirements(VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)5983 vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5984 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5985 // TODO : What to track here?
5986 // Could potentially save returned mem requirements and validate values passed into BindImageMemory
5987 my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5988 }
5989 #endif
5990 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyImageView(VkDevice device,VkImageView imageView,const VkAllocationCallbacks * pAllocator)5991 vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5992 get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5993 ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5994 // TODO : Clean up any internal data structures using this obj.
5995 }
5996
5997 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyShaderModule(VkDevice device,VkShaderModule shaderModule,const VkAllocationCallbacks * pAllocator)5998 vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5999 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6000
6001 loader_platform_thread_lock_mutex(&globalLock);
6002
6003 my_data->shaderModuleMap.erase(shaderModule);
6004
6005 loader_platform_thread_unlock_mutex(&globalLock);
6006
6007 my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
6008 }
6009
6010 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyPipeline(VkDevice device,VkPipeline pipeline,const VkAllocationCallbacks * pAllocator)6011 vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6012 get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
6013 // TODO : Clean up any internal data structures using this obj.
6014 }
6015
6016 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyPipelineLayout(VkDevice device,VkPipelineLayout pipelineLayout,const VkAllocationCallbacks * pAllocator)6017 vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6018 get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6019 ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6020 // TODO : Clean up any internal data structures using this obj.
6021 }
6022
6023 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroySampler(VkDevice device,VkSampler sampler,const VkAllocationCallbacks * pAllocator)6024 vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6025 get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
6026 // TODO : Clean up any internal data structures using this obj.
6027 }
6028
6029 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyDescriptorSetLayout(VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)6030 vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6031 get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6032 ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6033 // TODO : Clean up any internal data structures using this obj.
6034 }
6035
6036 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)6037 vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6038 get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6039 ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
6040 // TODO : Clean up any internal data structures using this obj.
6041 }
6042
6043 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkFreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)6044 vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6045 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6046
6047 bool skip_call = false;
6048 loader_platform_thread_lock_mutex(&globalLock);
6049 for (uint32_t i = 0; i < commandBufferCount; i++) {
6050 #if MTMERGESOURCE
6051 clear_cmd_buf_and_mem_references(dev_data, pCommandBuffers[i]);
6052 #endif
6053 if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
6054 skip_call |=
6055 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6056 reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6057 "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
6058 reinterpret_cast<uint64_t>(pCommandBuffers[i]));
6059 }
6060 // Delete CB information structure, and remove from commandBufferMap
6061 auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
6062 if (cb != dev_data->commandBufferMap.end()) {
6063 // reset prior to delete for data clean-up
6064 resetCB(dev_data, (*cb).second->commandBuffer);
6065 delete (*cb).second;
6066 dev_data->commandBufferMap.erase(cb);
6067 }
6068
6069 // Remove commandBuffer reference from commandPoolMap
6070 dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
6071 }
6072 #if MTMERGESOURCE
6073 printCBList(dev_data, device);
6074 #endif
6075 loader_platform_thread_unlock_mutex(&globalLock);
6076
6077 if (!skip_call)
6078 dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6079 }
6080
vkCreateCommandPool(VkDevice device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCommandPool)6081 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6082 const VkAllocationCallbacks *pAllocator,
6083 VkCommandPool *pCommandPool) {
6084 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6085
6086 VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6087
6088 if (VK_SUCCESS == result) {
6089 loader_platform_thread_lock_mutex(&globalLock);
6090 dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6091 dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6092 loader_platform_thread_unlock_mutex(&globalLock);
6093 }
6094 return result;
6095 }
6096
vkCreateQueryPool(VkDevice device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool)6097 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6098 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6099
6100 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6101 VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6102 if (result == VK_SUCCESS) {
6103 loader_platform_thread_lock_mutex(&globalLock);
6104 dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
6105 loader_platform_thread_unlock_mutex(&globalLock);
6106 }
6107 return result;
6108 }
6109
validateCommandBuffersNotInUse(const layer_data * dev_data,VkCommandPool commandPool)6110 VkBool32 validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool) {
6111 VkBool32 skipCall = VK_FALSE;
6112 auto pool_data = dev_data->commandPoolMap.find(commandPool);
6113 if (pool_data != dev_data->commandPoolMap.end()) {
6114 for (auto cmdBuffer : pool_data->second.commandBuffers) {
6115 if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
6116 skipCall |=
6117 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
6118 (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
6119 "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
6120 (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
6121 }
6122 }
6123 }
6124 return skipCall;
6125 }
6126
6127 // Destroy commandPool along with all of the commandBuffers allocated from that pool
6128 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyCommandPool(VkDevice device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)6129 vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6130 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6131 bool commandBufferComplete = false;
6132 bool skipCall = false;
6133 loader_platform_thread_lock_mutex(&globalLock);
6134 #if MTMERGESOURCE
6135 // Verify that command buffers in pool are complete (not in-flight)
6136 // MTMTODO : Merge this with code below (separate *NotInUse() call)
6137 for (auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6138 it != dev_data->commandPoolMap[commandPool].commandBuffers.end(); it++) {
6139 commandBufferComplete = VK_FALSE;
6140 skipCall = checkCBCompleted(dev_data, *it, &commandBufferComplete);
6141 if (VK_FALSE == commandBufferComplete) {
6142 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6143 (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6144 "Destroying Command Pool 0x%" PRIxLEAST64 " before "
6145 "its command buffer (0x%" PRIxLEAST64 ") has completed.",
6146 (uint64_t)(commandPool), reinterpret_cast<uint64_t>(*it));
6147 }
6148 }
6149 #endif
6150 // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
6151 if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
6152 for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6153 poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
6154 auto del_cb = dev_data->commandBufferMap.find(*poolCb);
6155 delete (*del_cb).second; // delete CB info structure
6156 dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
6157 poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
6158 poolCb); // Remove CB reference from commandPoolMap's list
6159 }
6160 }
6161 dev_data->commandPoolMap.erase(commandPool);
6162
6163 loader_platform_thread_unlock_mutex(&globalLock);
6164
6165 if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6166 return;
6167
6168 if (!skipCall)
6169 dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6170 #if MTMERGESOURCE
6171 loader_platform_thread_lock_mutex(&globalLock);
6172 auto item = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6173 // Remove command buffers from command buffer map
6174 while (item != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6175 auto del_item = item++;
6176 delete_cmd_buf_info(dev_data, commandPool, *del_item);
6177 }
6178 dev_data->commandPoolMap.erase(commandPool);
6179 loader_platform_thread_unlock_mutex(&globalLock);
6180 #endif
6181 }
6182
6183 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags)6184 vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6185 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6186 bool commandBufferComplete = false;
6187 bool skipCall = false;
6188 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6189 #if MTMERGESOURCE
6190 // MTMTODO : Merge this with *NotInUse() call below
6191 loader_platform_thread_lock_mutex(&globalLock);
6192 auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6193 // Verify that CB's in pool are complete (not in-flight)
6194 while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6195 skipCall = checkCBCompleted(dev_data, (*it), &commandBufferComplete);
6196 if (!commandBufferComplete) {
6197 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6198 (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6199 "Resetting CB %p before it has completed. You must check CB "
6200 "flag before calling vkResetCommandBuffer().",
6201 (*it));
6202 } else {
6203 // Clear memory references at this point.
6204 clear_cmd_buf_and_mem_references(dev_data, (*it));
6205 }
6206 ++it;
6207 }
6208 loader_platform_thread_unlock_mutex(&globalLock);
6209 #endif
6210 if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6211 return VK_ERROR_VALIDATION_FAILED_EXT;
6212
6213 if (!skipCall)
6214 result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6215
6216 // Reset all of the CBs allocated from this pool
6217 if (VK_SUCCESS == result) {
6218 loader_platform_thread_lock_mutex(&globalLock);
6219 auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6220 while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6221 resetCB(dev_data, (*it));
6222 ++it;
6223 }
6224 loader_platform_thread_unlock_mutex(&globalLock);
6225 }
6226 return result;
6227 }
6228
vkResetFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences)6229 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6230 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6231 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6232 bool skipCall = false;
6233 loader_platform_thread_lock_mutex(&globalLock);
6234 for (uint32_t i = 0; i < fenceCount; ++i) {
6235 #if MTMERGESOURCE
6236 // Reset fence state in fenceCreateInfo structure
6237 // MTMTODO : Merge with code below
6238 auto fence_item = dev_data->fenceMap.find(pFences[i]);
6239 if (fence_item != dev_data->fenceMap.end()) {
6240 // Validate fences in SIGNALED state
6241 if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
6242 // TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
6243 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6244 (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
6245 "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
6246 } else {
6247 fence_item->second.createInfo.flags =
6248 static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
6249 }
6250 }
6251 #endif
6252 if (dev_data->fenceMap[pFences[i]].in_use.load()) {
6253 skipCall |=
6254 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6255 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6256 "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6257 }
6258 }
6259 loader_platform_thread_unlock_mutex(&globalLock);
6260 if (!skipCall)
6261 result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6262 return result;
6263 }
6264
6265 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyFramebuffer(VkDevice device,VkFramebuffer framebuffer,const VkAllocationCallbacks * pAllocator)6266 vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6267 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6268 auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6269 if (fbNode != dev_data->frameBufferMap.end()) {
6270 for (auto cb : fbNode->second.referencingCmdBuffers) {
6271 auto cbNode = dev_data->commandBufferMap.find(cb);
6272 if (cbNode != dev_data->commandBufferMap.end()) {
6273 // Set CB as invalid and record destroyed framebuffer
6274 cbNode->second->state = CB_INVALID;
6275 loader_platform_thread_lock_mutex(&globalLock);
6276 cbNode->second->destroyedFramebuffers.insert(framebuffer);
6277 loader_platform_thread_unlock_mutex(&globalLock);
6278 }
6279 }
6280 loader_platform_thread_lock_mutex(&globalLock);
6281 dev_data->frameBufferMap.erase(framebuffer);
6282 loader_platform_thread_unlock_mutex(&globalLock);
6283 }
6284 dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6285 }
6286
6287 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroyRenderPass(VkDevice device,VkRenderPass renderPass,const VkAllocationCallbacks * pAllocator)6288 vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6289 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6290 dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6291 loader_platform_thread_lock_mutex(&globalLock);
6292 dev_data->renderPassMap.erase(renderPass);
6293 loader_platform_thread_unlock_mutex(&globalLock);
6294 }
6295
vkCreateBuffer(VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)6296 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6297 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6298 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6299
6300 VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6301
6302 if (VK_SUCCESS == result) {
6303 loader_platform_thread_lock_mutex(&globalLock);
6304 #if MTMERGESOURCE
6305 add_object_create_info(dev_data, (uint64_t)*pBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, pCreateInfo);
6306 #endif
6307 // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6308 dev_data->bufferMap[*pBuffer].create_info = unique_ptr<VkBufferCreateInfo>(new VkBufferCreateInfo(*pCreateInfo));
6309 dev_data->bufferMap[*pBuffer].in_use.store(0);
6310 loader_platform_thread_unlock_mutex(&globalLock);
6311 }
6312 return result;
6313 }
6314
vkCreateBufferView(VkDevice device,const VkBufferViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBufferView * pView)6315 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6316 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6317 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6318 VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6319 if (VK_SUCCESS == result) {
6320 loader_platform_thread_lock_mutex(&globalLock);
6321 dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6322 #if MTMERGESOURCE
6323 // In order to create a valid buffer view, the buffer must have been created with at least one of the
6324 // following flags: UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6325 validate_buffer_usage_flags(dev_data, device, pCreateInfo->buffer,
6326 VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, VK_FALSE,
6327 "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6328 #endif
6329 loader_platform_thread_unlock_mutex(&globalLock);
6330 }
6331 return result;
6332 }
6333
vkCreateImage(VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)6334 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6335 const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6336 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6337
6338 VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6339
6340 if (VK_SUCCESS == result) {
6341 loader_platform_thread_lock_mutex(&globalLock);
6342 #if MTMERGESOURCE
6343 add_object_create_info(dev_data, (uint64_t)*pImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, pCreateInfo);
6344 #endif
6345 IMAGE_LAYOUT_NODE image_node;
6346 image_node.layout = pCreateInfo->initialLayout;
6347 image_node.format = pCreateInfo->format;
6348 dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6349 ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6350 dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6351 dev_data->imageLayoutMap[subpair] = image_node;
6352 loader_platform_thread_unlock_mutex(&globalLock);
6353 }
6354 return result;
6355 }
6356
ResolveRemainingLevelsLayers(layer_data * dev_data,VkImageSubresourceRange * range,VkImage image)6357 static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6358 /* expects globalLock to be held by caller */
6359
6360 auto image_node_it = dev_data->imageMap.find(image);
6361 if (image_node_it != dev_data->imageMap.end()) {
6362 /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6363 * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6364 * the actual values.
6365 */
6366 if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6367 range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6368 }
6369
6370 if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6371 range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6372 }
6373 }
6374 }
6375
6376 // Return the correct layer/level counts if the caller used the special
6377 // values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
ResolveRemainingLevelsLayers(layer_data * dev_data,uint32_t * levels,uint32_t * layers,VkImageSubresourceRange range,VkImage image)6378 static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6379 VkImage image) {
6380 /* expects globalLock to be held by caller */
6381
6382 *levels = range.levelCount;
6383 *layers = range.layerCount;
6384 auto image_node_it = dev_data->imageMap.find(image);
6385 if (image_node_it != dev_data->imageMap.end()) {
6386 if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6387 *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6388 }
6389 if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6390 *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6391 }
6392 }
6393 }
6394
vkCreateImageView(VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)6395 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6396 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6397 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6398 VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6399 if (VK_SUCCESS == result) {
6400 loader_platform_thread_lock_mutex(&globalLock);
6401 VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6402 ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6403 dev_data->imageViewMap[*pView] = localCI;
6404 #if MTMERGESOURCE
6405 // Validate that img has correct usage flags set
6406 validate_image_usage_flags(dev_data, device, pCreateInfo->image,
6407 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6408 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6409 VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6410 #endif
6411 loader_platform_thread_unlock_mutex(&globalLock);
6412 }
6413 return result;
6414 }
6415
6416 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkCreateFence(VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)6417 vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6418 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6419 VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6420 if (VK_SUCCESS == result) {
6421 loader_platform_thread_lock_mutex(&globalLock);
6422 FENCE_NODE *pFN = &dev_data->fenceMap[*pFence];
6423 #if MTMERGESOURCE
6424 memset(pFN, 0, sizeof(MT_FENCE_INFO));
6425 memcpy(&(pFN->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
6426 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6427 pFN->firstTimeFlag = VK_TRUE;
6428 }
6429 #endif
6430 pFN->in_use.store(0);
6431 loader_platform_thread_unlock_mutex(&globalLock);
6432 }
6433 return result;
6434 }
6435
6436 // TODO handle pipeline caches
vkCreatePipelineCache(VkDevice device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)6437 VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6438 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6439 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6440 VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6441 return result;
6442 }
6443
6444 VKAPI_ATTR void VKAPI_CALL
vkDestroyPipelineCache(VkDevice device,VkPipelineCache pipelineCache,const VkAllocationCallbacks * pAllocator)6445 vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6446 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6447 dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6448 }
6449
6450 VKAPI_ATTR VkResult VKAPI_CALL
vkGetPipelineCacheData(VkDevice device,VkPipelineCache pipelineCache,size_t * pDataSize,void * pData)6451 vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6452 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6453 VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6454 return result;
6455 }
6456
6457 VKAPI_ATTR VkResult VKAPI_CALL
vkMergePipelineCaches(VkDevice device,VkPipelineCache dstCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)6458 vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6459 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6460 VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6461 return result;
6462 }
6463
6464 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkCreateGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)6465 vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6466 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6467 VkPipeline *pPipelines) {
6468 VkResult result = VK_SUCCESS;
6469 // TODO What to do with pipelineCache?
6470 // The order of operations here is a little convoluted but gets the job done
6471 // 1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6472 // 2. Create state is then validated (which uses flags setup during shadowing)
6473 // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6474 VkBool32 skipCall = VK_FALSE;
6475 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6476 vector<PIPELINE_NODE *> pPipeNode(count);
6477 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6478
6479 uint32_t i = 0;
6480 loader_platform_thread_lock_mutex(&globalLock);
6481
6482 for (i = 0; i < count; i++) {
6483 pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i]);
6484 skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6485 }
6486
6487 if (VK_FALSE == skipCall) {
6488 loader_platform_thread_unlock_mutex(&globalLock);
6489 result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6490 pPipelines);
6491 loader_platform_thread_lock_mutex(&globalLock);
6492 for (i = 0; i < count; i++) {
6493 pPipeNode[i]->pipeline = pPipelines[i];
6494 dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6495 }
6496 loader_platform_thread_unlock_mutex(&globalLock);
6497 } else {
6498 for (i = 0; i < count; i++) {
6499 delete pPipeNode[i];
6500 }
6501 loader_platform_thread_unlock_mutex(&globalLock);
6502 return VK_ERROR_VALIDATION_FAILED_EXT;
6503 }
6504 return result;
6505 }
6506
6507 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkCreateComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)6508 vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6509 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6510 VkPipeline *pPipelines) {
6511 VkResult result = VK_SUCCESS;
6512 VkBool32 skipCall = VK_FALSE;
6513
6514 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6515 vector<PIPELINE_NODE *> pPipeNode(count);
6516 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6517
6518 uint32_t i = 0;
6519 loader_platform_thread_lock_mutex(&globalLock);
6520 for (i = 0; i < count; i++) {
6521 // TODO: Verify compute stage bits
6522
6523 // Create and initialize internal tracking data structure
6524 pPipeNode[i] = new PIPELINE_NODE;
6525 memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6526
6527 // TODO: Add Compute Pipeline Verification
6528 // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6529 }
6530
6531 if (VK_FALSE == skipCall) {
6532 loader_platform_thread_unlock_mutex(&globalLock);
6533 result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6534 pPipelines);
6535 loader_platform_thread_lock_mutex(&globalLock);
6536 for (i = 0; i < count; i++) {
6537 pPipeNode[i]->pipeline = pPipelines[i];
6538 dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6539 }
6540 loader_platform_thread_unlock_mutex(&globalLock);
6541 } else {
6542 for (i = 0; i < count; i++) {
6543 // Clean up any locally allocated data structures
6544 delete pPipeNode[i];
6545 }
6546 loader_platform_thread_unlock_mutex(&globalLock);
6547 return VK_ERROR_VALIDATION_FAILED_EXT;
6548 }
6549 return result;
6550 }
6551
vkCreateSampler(VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)6552 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6553 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6554 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6555 VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6556 if (VK_SUCCESS == result) {
6557 loader_platform_thread_lock_mutex(&globalLock);
6558 dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6559 loader_platform_thread_unlock_mutex(&globalLock);
6560 }
6561 return result;
6562 }
6563
6564 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkCreateDescriptorSetLayout(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)6565 vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6566 const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6567 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6568 VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6569 if (VK_SUCCESS == result) {
6570 // TODOSC : Capture layout bindings set
6571 LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
6572 if (NULL == pNewNode) {
6573 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
6574 (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6575 "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
6576 return VK_ERROR_VALIDATION_FAILED_EXT;
6577 }
6578 memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
6579 pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
6580 memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
6581 sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
6582 // g++ does not like reserve with size 0
6583 if (pCreateInfo->bindingCount)
6584 pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
6585 uint32_t totalCount = 0;
6586 for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6587 if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
6588 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6589 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
6590 DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
6591 "VkDescriptorSetLayoutBinding"))
6592 return VK_ERROR_VALIDATION_FAILED_EXT;
6593 } else {
6594 pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
6595 }
6596 totalCount += pCreateInfo->pBindings[i].descriptorCount;
6597 if (pCreateInfo->pBindings[i].pImmutableSamplers) {
6598 VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
6599 *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
6600 memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
6601 pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
6602 }
6603 }
6604 pNewNode->layout = *pSetLayout;
6605 pNewNode->startIndex = 0;
6606 if (totalCount > 0) {
6607 pNewNode->descriptorTypes.resize(totalCount);
6608 pNewNode->stageFlags.resize(totalCount);
6609 uint32_t offset = 0;
6610 uint32_t j = 0;
6611 VkDescriptorType dType;
6612 for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6613 dType = pCreateInfo->pBindings[i].descriptorType;
6614 for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
6615 pNewNode->descriptorTypes[offset + j] = dType;
6616 pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags;
6617 if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
6618 (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
6619 pNewNode->dynamicDescriptorCount++;
6620 }
6621 }
6622 offset += j;
6623 }
6624 pNewNode->endIndex = pNewNode->startIndex + totalCount - 1;
6625 } else { // no descriptors
6626 pNewNode->endIndex = 0;
6627 }
6628 // Put new node at Head of global Layer list
6629 loader_platform_thread_lock_mutex(&globalLock);
6630 dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode;
6631 loader_platform_thread_unlock_mutex(&globalLock);
6632 }
6633 return result;
6634 }
6635
validatePushConstantSize(const layer_data * dev_data,const uint32_t offset,const uint32_t size,const char * caller_name)6636 static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6637 const char *caller_name) {
6638 bool skipCall = false;
6639 if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
6640 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6641 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6642 "exceeds this device's maxPushConstantSize of %u.",
6643 caller_name, offset, size, dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
6644 }
6645 return skipCall;
6646 }
6647
vkCreatePipelineLayout(VkDevice device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout)6648 VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6649 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6650 bool skipCall = false;
6651 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6652 uint32_t i = 0;
6653 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6654 skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6655 pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6656 if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6657 skipCall |=
6658 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6659 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6660 "size %u. Size must be greater than zero and a multiple of 4.",
6661 i, pCreateInfo->pPushConstantRanges[i].size);
6662 }
6663 // TODO : Add warning if ranges overlap
6664 }
6665 VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6666 if (VK_SUCCESS == result) {
6667 loader_platform_thread_lock_mutex(&globalLock);
6668 // TODOSC : Merge capture of the setLayouts per pipeline
6669 PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6670 plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6671 for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6672 plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6673 }
6674 plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6675 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6676 plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6677 }
6678 loader_platform_thread_unlock_mutex(&globalLock);
6679 }
6680 return result;
6681 }
6682
6683 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkCreateDescriptorPool(VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)6684 vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6685 VkDescriptorPool *pDescriptorPool) {
6686 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6687 VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6688 if (VK_SUCCESS == result) {
6689 // Insert this pool into Global Pool LL at head
6690 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6691 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6692 (uint64_t)*pDescriptorPool))
6693 return VK_ERROR_VALIDATION_FAILED_EXT;
6694 DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6695 if (NULL == pNewNode) {
6696 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6697 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6698 "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6699 return VK_ERROR_VALIDATION_FAILED_EXT;
6700 } else {
6701 loader_platform_thread_lock_mutex(&globalLock);
6702 dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6703 loader_platform_thread_unlock_mutex(&globalLock);
6704 }
6705 } else {
6706 // Need to do anything if pool create fails?
6707 }
6708 return result;
6709 }
6710
6711 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)6712 vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6713 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6714 VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6715 if (VK_SUCCESS == result) {
6716 loader_platform_thread_lock_mutex(&globalLock);
6717 clearDescriptorPool(dev_data, device, descriptorPool, flags);
6718 loader_platform_thread_unlock_mutex(&globalLock);
6719 }
6720 return result;
6721 }
6722
6723 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkAllocateDescriptorSets(VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)6724 vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6725 VkBool32 skipCall = VK_FALSE;
6726 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6727
6728 loader_platform_thread_lock_mutex(&globalLock);
6729 // Verify that requested descriptorSets are available in pool
6730 DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6731 if (!pPoolNode) {
6732 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6733 (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6734 "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6735 (uint64_t)pAllocateInfo->descriptorPool);
6736 } else { // Make sure pool has all the available descriptors before calling down chain
6737 skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6738 pAllocateInfo->pSetLayouts);
6739 }
6740 loader_platform_thread_unlock_mutex(&globalLock);
6741 if (skipCall)
6742 return VK_ERROR_VALIDATION_FAILED_EXT;
6743 VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6744 if (VK_SUCCESS == result) {
6745 loader_platform_thread_lock_mutex(&globalLock);
6746 DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6747 if (pPoolNode) {
6748 if (pAllocateInfo->descriptorSetCount == 0) {
6749 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6750 pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6751 "AllocateDescriptorSets called with 0 count");
6752 }
6753 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6754 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6755 (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6756 (uint64_t)pDescriptorSets[i]);
6757 // Create new set node and add to head of pool nodes
6758 SET_NODE *pNewNode = new SET_NODE;
6759 if (NULL == pNewNode) {
6760 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6761 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6762 DRAWSTATE_OUT_OF_MEMORY, "DS",
6763 "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()"))
6764 return VK_ERROR_VALIDATION_FAILED_EXT;
6765 } else {
6766 // TODO : Pool should store a total count of each type of Descriptor available
6767 // When descriptors are allocated, decrement the count and validate here
6768 // that the count doesn't go below 0. One reset/free need to bump count back up.
6769 // Insert set at head of Set LL for this pool
6770 pNewNode->pNext = pPoolNode->pSets;
6771 pNewNode->in_use.store(0);
6772 pPoolNode->pSets = pNewNode;
6773 LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
6774 if (NULL == pLayout) {
6775 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6776 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6777 __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6778 "Unable to find set layout node for layout %#" PRIxLEAST64
6779 " specified in vkAllocateDescriptorSets() call",
6780 (uint64_t)pAllocateInfo->pSetLayouts[i]))
6781 return VK_ERROR_VALIDATION_FAILED_EXT;
6782 }
6783 pNewNode->pLayout = pLayout;
6784 pNewNode->pool = pAllocateInfo->descriptorPool;
6785 pNewNode->set = pDescriptorSets[i];
6786 pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
6787 if (pNewNode->descriptorCount) {
6788 size_t descriptorArraySize = sizeof(GENERIC_HEADER *) * pNewNode->descriptorCount;
6789 pNewNode->ppDescriptors = new GENERIC_HEADER *[descriptorArraySize];
6790 memset(pNewNode->ppDescriptors, 0, descriptorArraySize);
6791 }
6792 dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6793 }
6794 }
6795 }
6796 loader_platform_thread_unlock_mutex(&globalLock);
6797 }
6798 return result;
6799 }
6800
6801 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkFreeDescriptorSets(VkDevice device,VkDescriptorPool descriptorPool,uint32_t count,const VkDescriptorSet * pDescriptorSets)6802 vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6803 VkBool32 skipCall = VK_FALSE;
6804 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6805 // Make sure that no sets being destroyed are in-flight
6806 loader_platform_thread_lock_mutex(&globalLock);
6807 for (uint32_t i = 0; i < count; ++i)
6808 skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDesriptorSets");
6809 DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6810 if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6811 // Can't Free from a NON_FREE pool
6812 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6813 (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6814 "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6815 "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6816 }
6817 loader_platform_thread_unlock_mutex(&globalLock);
6818 if (VK_FALSE != skipCall)
6819 return VK_ERROR_VALIDATION_FAILED_EXT;
6820 VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6821 if (VK_SUCCESS == result) {
6822 loader_platform_thread_lock_mutex(&globalLock);
6823
6824 // Update available descriptor sets in pool
6825 pPoolNode->availableSets += count;
6826
6827 // For each freed descriptor add it back into the pool as available
6828 for (uint32_t i = 0; i < count; ++i) {
6829 SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6830 invalidateBoundCmdBuffers(dev_data, pSet);
6831 LAYOUT_NODE *pLayout = pSet->pLayout;
6832 uint32_t typeIndex = 0, poolSizeCount = 0;
6833 for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
6834 typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
6835 poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
6836 pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6837 }
6838 }
6839 loader_platform_thread_unlock_mutex(&globalLock);
6840 }
6841 // TODO : Any other clean-up or book-keeping to do here?
6842 return result;
6843 }
6844
6845 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkUpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)6846 vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6847 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6848 // dsUpdate will return VK_TRUE only if a bailout error occurs, so we want to call down tree when update returns VK_FALSE
6849 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6850 loader_platform_thread_lock_mutex(&globalLock);
6851 #if MTMERGESOURCE
6852 // MTMTODO : Merge this in with existing update code below and handle descriptor copies case
6853 uint32_t j = 0;
6854 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
6855 if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
6856 for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6857 dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].images.push_back(
6858 pDescriptorWrites[i].pImageInfo[j].imageView);
6859 }
6860 } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
6861 for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6862 dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6863 dev_data->bufferViewMap[pDescriptorWrites[i].pTexelBufferView[j]].buffer);
6864 }
6865 } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
6866 pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6867 for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6868 dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6869 pDescriptorWrites[i].pBufferInfo[j].buffer);
6870 }
6871 }
6872 }
6873 #endif
6874 VkBool32 rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6875 loader_platform_thread_unlock_mutex(&globalLock);
6876 if (!rtn) {
6877 dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6878 pDescriptorCopies);
6879 }
6880 }
6881
6882 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkAllocateCommandBuffers(VkDevice device,const VkCommandBufferAllocateInfo * pCreateInfo,VkCommandBuffer * pCommandBuffer)6883 vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6884 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6885 VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6886 if (VK_SUCCESS == result) {
6887 loader_platform_thread_lock_mutex(&globalLock);
6888 auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6889 if (cp_it != dev_data->commandPoolMap.end()) {
6890 for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6891 // Add command buffer to its commandPool map
6892 cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6893 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6894 // Add command buffer to map
6895 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6896 resetCB(dev_data, pCommandBuffer[i]);
6897 pCB->createInfo = *pCreateInfo;
6898 pCB->device = device;
6899 }
6900 }
6901 #if MTMERGESOURCE
6902 printCBList(dev_data, device);
6903 #endif
6904 loader_platform_thread_unlock_mutex(&globalLock);
6905 }
6906 return result;
6907 }
6908
6909 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkBeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)6910 vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6911 VkBool32 skipCall = VK_FALSE;
6912 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6913 loader_platform_thread_lock_mutex(&globalLock);
6914 // Validate command buffer level
6915 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6916 if (pCB) {
6917 #if MTMERGESOURCE
6918 bool commandBufferComplete = false;
6919 // MTMTODO : Merge this with code below
6920 // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6921 skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
6922
6923 if (!commandBufferComplete) {
6924 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6925 (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6926 "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
6927 "You must check CB flag before this call.",
6928 commandBuffer);
6929 }
6930 #endif
6931 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6932 // Secondary Command Buffer
6933 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6934 if (!pInfo) {
6935 skipCall |=
6936 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6937 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6938 "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
6939 reinterpret_cast<void *>(commandBuffer));
6940 } else {
6941 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6942 if (!pInfo->renderPass) { // renderpass should NOT be null for an Secondary CB
6943 skipCall |= log_msg(
6944 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6945 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6946 "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
6947 reinterpret_cast<void *>(commandBuffer));
6948 }
6949 if (!pInfo->framebuffer) { // framebuffer may be null for an Secondary CB, but this affects perf
6950 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6951 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6952 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6953 "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
6954 "valid framebuffer parameter is specified.",
6955 reinterpret_cast<void *>(commandBuffer));
6956 } else {
6957 string errorString = "";
6958 auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
6959 if (fbNode != dev_data->frameBufferMap.end()) {
6960 VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
6961 if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6962 // renderPass that framebuffer was created with
6963 // must
6964 // be compatible with local renderPass
6965 skipCall |=
6966 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6967 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6968 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6969 "DS", "vkBeginCommandBuffer(): Secondary Command "
6970 "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
6971 "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
6972 reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6973 (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6974 }
6975 // Connect this framebuffer to this cmdBuffer
6976 fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
6977 }
6978 }
6979 }
6980 if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6981 dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) &&
6982 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6983 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6984 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6985 __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6986 "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
6987 "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6988 "support precise occlusion queries.",
6989 reinterpret_cast<void *>(commandBuffer));
6990 }
6991 }
6992 if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6993 auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
6994 if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
6995 if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
6996 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6997 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6998 DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6999 "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
7000 "that is less than the number of subpasses (%d).",
7001 (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
7002 }
7003 }
7004 }
7005 }
7006 if (CB_RECORDING == pCB->state) {
7007 skipCall |=
7008 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7009 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7010 "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
7011 ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7012 (uint64_t)commandBuffer);
7013 } else if (CB_RECORDED == pCB->state) {
7014 VkCommandPool cmdPool = pCB->createInfo.commandPool;
7015 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7016 skipCall |=
7017 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7018 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7019 "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
7020 ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
7021 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7022 (uint64_t)commandBuffer, (uint64_t)cmdPool);
7023 }
7024 resetCB(dev_data, commandBuffer);
7025 }
7026 // Set updated state here in case implicit reset occurs above
7027 pCB->state = CB_RECORDING;
7028 pCB->beginInfo = *pBeginInfo;
7029 if (pCB->beginInfo.pInheritanceInfo) {
7030 pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
7031 pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
7032 }
7033 } else {
7034 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7035 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7036 "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
7037 }
7038 loader_platform_thread_unlock_mutex(&globalLock);
7039 if (VK_FALSE != skipCall) {
7040 return VK_ERROR_VALIDATION_FAILED_EXT;
7041 }
7042 VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
7043 #if MTMERGESOURCE
7044 loader_platform_thread_lock_mutex(&globalLock);
7045 clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7046 loader_platform_thread_unlock_mutex(&globalLock);
7047 #endif
7048 return result;
7049 }
7050
vkEndCommandBuffer(VkCommandBuffer commandBuffer)7051 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
7052 VkBool32 skipCall = VK_FALSE;
7053 VkResult result = VK_SUCCESS;
7054 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7055 loader_platform_thread_lock_mutex(&globalLock);
7056 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7057 if (pCB) {
7058 if (pCB->state != CB_RECORDING) {
7059 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
7060 }
7061 for (auto query : pCB->activeQueries) {
7062 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7063 DRAWSTATE_INVALID_QUERY, "DS",
7064 "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
7065 (uint64_t)(query.pool), query.index);
7066 }
7067 }
7068 if (VK_FALSE == skipCall) {
7069 loader_platform_thread_unlock_mutex(&globalLock);
7070 result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
7071 loader_platform_thread_lock_mutex(&globalLock);
7072 if (VK_SUCCESS == result) {
7073 pCB->state = CB_RECORDED;
7074 // Reset CB status flags
7075 pCB->status = 0;
7076 printCB(dev_data, commandBuffer);
7077 }
7078 } else {
7079 result = VK_ERROR_VALIDATION_FAILED_EXT;
7080 }
7081 loader_platform_thread_unlock_mutex(&globalLock);
7082 return result;
7083 }
7084
7085 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)7086 vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7087 VkBool32 skipCall = VK_FALSE;
7088 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7089 loader_platform_thread_lock_mutex(&globalLock);
7090 #if MTMERGESOURCE
7091 bool commandBufferComplete = false;
7092 // Verify that CB is complete (not in-flight)
7093 skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7094 if (!commandBufferComplete) {
7095 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7096 (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7097 "Resetting CB %p before it has completed. You must check CB "
7098 "flag before calling vkResetCommandBuffer().",
7099 commandBuffer);
7100 }
7101 // Clear memory references as this point.
7102 clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7103 #endif
7104 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7105 VkCommandPool cmdPool = pCB->createInfo.commandPool;
7106 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7107 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7108 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7109 "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
7110 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7111 (uint64_t)commandBuffer, (uint64_t)cmdPool);
7112 }
7113 if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7114 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7115 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7116 "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
7117 reinterpret_cast<uint64_t>(commandBuffer));
7118 }
7119 loader_platform_thread_unlock_mutex(&globalLock);
7120 if (skipCall != VK_FALSE)
7121 return VK_ERROR_VALIDATION_FAILED_EXT;
7122 VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
7123 if (VK_SUCCESS == result) {
7124 loader_platform_thread_lock_mutex(&globalLock);
7125 resetCB(dev_data, commandBuffer);
7126 loader_platform_thread_unlock_mutex(&globalLock);
7127 }
7128 return result;
7129 }
7130 #if MTMERGESOURCE
7131 // TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
7132 // need to account for that mem now having binding to given commandBuffer
7133 #endif
7134 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline pipeline)7135 vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7136 VkBool32 skipCall = VK_FALSE;
7137 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7138 loader_platform_thread_lock_mutex(&globalLock);
7139 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7140 if (pCB) {
7141 skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7142 if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7143 skipCall |=
7144 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7145 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7146 "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
7147 (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
7148 }
7149
7150 PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7151 if (pPN) {
7152 pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
7153 set_cb_pso_status(pCB, pPN);
7154 skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
7155 } else {
7156 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7157 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7158 "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7159 }
7160 }
7161 loader_platform_thread_unlock_mutex(&globalLock);
7162 if (VK_FALSE == skipCall)
7163 dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7164 }
7165
7166 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdSetViewport(VkCommandBuffer commandBuffer,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * pViewports)7167 vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7168 VkBool32 skipCall = VK_FALSE;
7169 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7170 loader_platform_thread_lock_mutex(&globalLock);
7171 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7172 if (pCB) {
7173 skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7174 pCB->status |= CBSTATUS_VIEWPORT_SET;
7175 pCB->viewports.resize(viewportCount);
7176 memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
7177 }
7178 loader_platform_thread_unlock_mutex(&globalLock);
7179 if (VK_FALSE == skipCall)
7180 dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7181 }
7182
7183 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdSetScissor(VkCommandBuffer commandBuffer,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * pScissors)7184 vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7185 VkBool32 skipCall = VK_FALSE;
7186 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7187 loader_platform_thread_lock_mutex(&globalLock);
7188 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7189 if (pCB) {
7190 skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7191 pCB->status |= CBSTATUS_SCISSOR_SET;
7192 pCB->scissors.resize(scissorCount);
7193 memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
7194 }
7195 loader_platform_thread_unlock_mutex(&globalLock);
7196 if (VK_FALSE == skipCall)
7197 dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7198 }
7199
vkCmdSetLineWidth(VkCommandBuffer commandBuffer,float lineWidth)7200 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7201 VkBool32 skipCall = VK_FALSE;
7202 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7203 loader_platform_thread_lock_mutex(&globalLock);
7204 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7205 if (pCB) {
7206 skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7207 pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7208 }
7209 loader_platform_thread_unlock_mutex(&globalLock);
7210 if (VK_FALSE == skipCall)
7211 dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7212 }
7213
7214 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdSetDepthBias(VkCommandBuffer commandBuffer,float depthBiasConstantFactor,float depthBiasClamp,float depthBiasSlopeFactor)7215 vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7216 VkBool32 skipCall = VK_FALSE;
7217 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7218 loader_platform_thread_lock_mutex(&globalLock);
7219 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7220 if (pCB) {
7221 skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7222 pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7223 }
7224 loader_platform_thread_unlock_mutex(&globalLock);
7225 if (VK_FALSE == skipCall)
7226 dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7227 depthBiasSlopeFactor);
7228 }
7229
vkCmdSetBlendConstants(VkCommandBuffer commandBuffer,const float blendConstants[4])7230 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7231 VkBool32 skipCall = VK_FALSE;
7232 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7233 loader_platform_thread_lock_mutex(&globalLock);
7234 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7235 if (pCB) {
7236 skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7237 pCB->status |= CBSTATUS_BLEND_SET;
7238 }
7239 loader_platform_thread_unlock_mutex(&globalLock);
7240 if (VK_FALSE == skipCall)
7241 dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7242 }
7243
7244 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdSetDepthBounds(VkCommandBuffer commandBuffer,float minDepthBounds,float maxDepthBounds)7245 vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7246 VkBool32 skipCall = VK_FALSE;
7247 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7248 loader_platform_thread_lock_mutex(&globalLock);
7249 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7250 if (pCB) {
7251 skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7252 pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7253 }
7254 loader_platform_thread_unlock_mutex(&globalLock);
7255 if (VK_FALSE == skipCall)
7256 dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7257 }
7258
7259 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t compareMask)7260 vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7261 VkBool32 skipCall = VK_FALSE;
7262 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7263 loader_platform_thread_lock_mutex(&globalLock);
7264 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7265 if (pCB) {
7266 skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7267 pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7268 }
7269 loader_platform_thread_unlock_mutex(&globalLock);
7270 if (VK_FALSE == skipCall)
7271 dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7272 }
7273
7274 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t writeMask)7275 vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7276 VkBool32 skipCall = VK_FALSE;
7277 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7278 loader_platform_thread_lock_mutex(&globalLock);
7279 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7280 if (pCB) {
7281 skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7282 pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7283 }
7284 loader_platform_thread_unlock_mutex(&globalLock);
7285 if (VK_FALSE == skipCall)
7286 dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7287 }
7288
7289 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdSetStencilReference(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t reference)7290 vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7291 VkBool32 skipCall = VK_FALSE;
7292 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7293 loader_platform_thread_lock_mutex(&globalLock);
7294 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7295 if (pCB) {
7296 skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7297 pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7298 }
7299 loader_platform_thread_unlock_mutex(&globalLock);
7300 if (VK_FALSE == skipCall)
7301 dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7302 }
7303
7304 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)7305 vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7306 uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7307 const uint32_t *pDynamicOffsets) {
7308 VkBool32 skipCall = VK_FALSE;
7309 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7310 loader_platform_thread_lock_mutex(&globalLock);
7311 #if MTMERGESOURCE
7312 // MTMTODO : Merge this with code below
7313 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7314 if (cb_data != dev_data->commandBufferMap.end()) {
7315 // MTMTODO : activeDescriptorSets should be merged with lastBound.boundDescriptorSets
7316 std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second->activeDescriptorSets;
7317 if (activeDescriptorSets.size() < (setCount + firstSet)) {
7318 activeDescriptorSets.resize(setCount + firstSet);
7319 }
7320 for (uint32_t i = 0; i < setCount; ++i) {
7321 activeDescriptorSets[i + firstSet] = pDescriptorSets[i];
7322 }
7323 }
7324 // TODO : Somewhere need to verify that all textures referenced by shaders in DS are in some type of *SHADER_READ* state
7325 #endif
7326 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7327 if (pCB) {
7328 if (pCB->state == CB_RECORDING) {
7329 // Track total count of dynamic descriptor types to make sure we have an offset for each one
7330 uint32_t totalDynamicDescriptors = 0;
7331 string errorString = "";
7332 uint32_t lastSetIndex = firstSet + setCount - 1;
7333 if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
7334 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7335 VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7336 for (uint32_t i = 0; i < setCount; i++) {
7337 SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7338 if (pSet) {
7339 pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
7340 pSet->boundCmdBuffers.insert(commandBuffer);
7341 pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
7342 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7343 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7344 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7345 DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7346 (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7347 if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7348 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7349 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7350 __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7351 "DS %#" PRIxLEAST64
7352 " bound but it was never updated. You may want to either update it or not bind it.",
7353 (uint64_t)pDescriptorSets[i]);
7354 }
7355 // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7356 if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7357 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7358 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7359 __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7360 "descriptorSet #%u being bound is not compatible with overlapping layout in "
7361 "pipelineLayout due to: %s",
7362 i, errorString.c_str());
7363 }
7364 if (pSet->pLayout->dynamicDescriptorCount) {
7365 // First make sure we won't overstep bounds of pDynamicOffsets array
7366 if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
7367 skipCall |=
7368 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7369 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7370 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7371 "descriptorSet #%u (%#" PRIxLEAST64
7372 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7373 "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7374 i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
7375 (dynamicOffsetCount - totalDynamicDescriptors));
7376 } else { // Validate and store dynamic offsets with the set
7377 // Validate Dynamic Offset Minimums
7378 uint32_t cur_dyn_offset = totalDynamicDescriptors;
7379 for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7380 if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7381 if (vk_safe_modulo(
7382 pDynamicOffsets[cur_dyn_offset],
7383 dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) !=
7384 0) {
7385 skipCall |= log_msg(
7386 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7387 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7388 DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7389 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7390 "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7391 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7392 dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
7393 }
7394 cur_dyn_offset++;
7395 } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7396 if (vk_safe_modulo(
7397 pDynamicOffsets[cur_dyn_offset],
7398 dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) !=
7399 0) {
7400 skipCall |= log_msg(
7401 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7402 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7403 DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7404 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7405 "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7406 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7407 dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
7408 }
7409 cur_dyn_offset++;
7410 }
7411 }
7412 // Keep running total of dynamic descriptor count to verify at the end
7413 totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount;
7414 }
7415 }
7416 } else {
7417 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7418 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7419 DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7420 (uint64_t)pDescriptorSets[i]);
7421 }
7422 skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7423 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7424 if (firstSet > 0) { // Check set #s below the first bound set
7425 for (uint32_t i = 0; i < firstSet; ++i) {
7426 if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7427 !verify_set_layout_compatibility(
7428 dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7429 errorString)) {
7430 skipCall |= log_msg(
7431 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7432 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7433 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7434 "DescriptorSetDS %#" PRIxLEAST64
7435 " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7436 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7437 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7438 }
7439 }
7440 }
7441 // Check if newly last bound set invalidates any remaining bound sets
7442 if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7443 if (oldFinalBoundSet &&
7444 !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7445 errorString)) {
7446 skipCall |=
7447 log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7448 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7449 DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7450 " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7451 " newly bound as set #%u so set #%u and any subsequent sets were "
7452 "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7453 (uint64_t)oldFinalBoundSet, lastSetIndex,
7454 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7455 lastSetIndex + 1, (uint64_t)layout);
7456 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7457 }
7458 }
7459 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7460 if (totalDynamicDescriptors != dynamicOffsetCount) {
7461 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7462 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7463 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7464 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7465 "is %u. It should exactly match the number of dynamic descriptors.",
7466 setCount, totalDynamicDescriptors, dynamicOffsetCount);
7467 }
7468 // Save dynamicOffsets bound to this CB
7469 for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7470 pCB->lastBound[pipelineBindPoint].dynamicOffsets.push_back(pDynamicOffsets[i]);
7471 }
7472 }
7473 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7474 if (totalDynamicDescriptors != dynamicOffsetCount) {
7475 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7476 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7477 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7478 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7479 "is %u. It should exactly match the number of dynamic descriptors.",
7480 setCount, totalDynamicDescriptors, dynamicOffsetCount);
7481 }
7482 // Save dynamicOffsets bound to this CB
7483 for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7484 pCB->dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7485 }
7486 } else {
7487 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7488 }
7489 }
7490 loader_platform_thread_unlock_mutex(&globalLock);
7491 if (VK_FALSE == skipCall)
7492 dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7493 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7494 }
7495
7496 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkIndexType indexType)7497 vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7498 VkBool32 skipCall = VK_FALSE;
7499 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7500 loader_platform_thread_lock_mutex(&globalLock);
7501 #if MTMERGESOURCE
7502 VkDeviceMemory mem;
7503 skipCall =
7504 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7505 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7506 if (cb_data != dev_data->commandBufferMap.end()) {
7507 std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7508 cb_data->second->validate_functions.push_back(function);
7509 }
7510 // TODO : Somewhere need to verify that IBs have correct usage state flagged
7511 #endif
7512 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7513 if (pCB) {
7514 skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7515 VkDeviceSize offset_align = 0;
7516 switch (indexType) {
7517 case VK_INDEX_TYPE_UINT16:
7518 offset_align = 2;
7519 break;
7520 case VK_INDEX_TYPE_UINT32:
7521 offset_align = 4;
7522 break;
7523 default:
7524 // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7525 break;
7526 }
7527 if (!offset_align || (offset % offset_align)) {
7528 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7529 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7530 "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7531 offset, string_VkIndexType(indexType));
7532 }
7533 pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7534 }
7535 loader_platform_thread_unlock_mutex(&globalLock);
7536 if (VK_FALSE == skipCall)
7537 dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7538 }
7539
updateResourceTracking(GLOBAL_CB_NODE * pCB,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers)7540 void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7541 uint32_t end = firstBinding + bindingCount;
7542 if (pCB->currentDrawData.buffers.size() < end) {
7543 pCB->currentDrawData.buffers.resize(end);
7544 }
7545 for (uint32_t i = 0; i < bindingCount; ++i) {
7546 pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7547 }
7548 }
7549
updateResourceTrackingOnDraw(GLOBAL_CB_NODE * pCB)7550 void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7551
vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets)7552 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7553 uint32_t bindingCount, const VkBuffer *pBuffers,
7554 const VkDeviceSize *pOffsets) {
7555 VkBool32 skipCall = VK_FALSE;
7556 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7557 loader_platform_thread_lock_mutex(&globalLock);
7558 #if MTMERGESOURCE
7559 for (uint32_t i = 0; i < bindingCount; ++i) {
7560 VkDeviceMemory mem;
7561 skipCall |= get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(pBuffers[i]),
7562 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7563 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7564 if (cb_data != dev_data->commandBufferMap.end()) {
7565 std::function<VkBool32()> function =
7566 [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7567 cb_data->second->validate_functions.push_back(function);
7568 }
7569 }
7570 // TODO : Somewhere need to verify that VBs have correct usage state flagged
7571 #endif
7572 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7573 if (pCB) {
7574 addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7575 updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7576 } else {
7577 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7578 }
7579 loader_platform_thread_unlock_mutex(&globalLock);
7580 if (VK_FALSE == skipCall)
7581 dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7582 }
7583
7584 #if MTMERGESOURCE
7585 /* expects globalLock to be held by caller */
markStoreImagesAndBuffersAsWritten(VkCommandBuffer commandBuffer)7586 bool markStoreImagesAndBuffersAsWritten(VkCommandBuffer commandBuffer) {
7587 bool skip_call = false;
7588 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7589 auto cb_data = my_data->commandBufferMap.find(commandBuffer);
7590 if (cb_data == my_data->commandBufferMap.end())
7591 return skip_call;
7592 std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second->activeDescriptorSets;
7593 for (auto descriptorSet : activeDescriptorSets) {
7594 auto ds_data = my_data->descriptorSetMap.find(descriptorSet);
7595 if (ds_data == my_data->descriptorSetMap.end())
7596 continue;
7597 std::vector<VkImageView> images = ds_data->second.images;
7598 std::vector<VkBuffer> buffers = ds_data->second.buffers;
7599 for (auto imageView : images) {
7600 auto iv_data = my_data->imageViewMap.find(imageView);
7601 if (iv_data == my_data->imageViewMap.end())
7602 continue;
7603 VkImage image = iv_data->second.image;
7604 VkDeviceMemory mem;
7605 skip_call |=
7606 get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7607 std::function<VkBool32()> function = [=]() {
7608 set_memory_valid(my_data, mem, true, image);
7609 return VK_FALSE;
7610 };
7611 cb_data->second->validate_functions.push_back(function);
7612 }
7613 for (auto buffer : buffers) {
7614 VkDeviceMemory mem;
7615 skip_call |=
7616 get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7617 std::function<VkBool32()> function = [=]() {
7618 set_memory_valid(my_data, mem, true);
7619 return VK_FALSE;
7620 };
7621 cb_data->second->validate_functions.push_back(function);
7622 }
7623 }
7624 return skip_call;
7625 }
7626 #endif
7627
vkCmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)7628 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7629 uint32_t firstVertex, uint32_t firstInstance) {
7630 VkBool32 skipCall = VK_FALSE;
7631 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7632 loader_platform_thread_lock_mutex(&globalLock);
7633 #if MTMERGESOURCE
7634 // MTMTODO : merge with code below
7635 skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7636 #endif
7637 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7638 if (pCB) {
7639 skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7640 pCB->drawCount[DRAW]++;
7641 skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7642 // TODO : Need to pass commandBuffer as srcObj here
7643 skipCall |=
7644 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7645 __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7646 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7647 if (VK_FALSE == skipCall) {
7648 updateResourceTrackingOnDraw(pCB);
7649 }
7650 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7651 }
7652 loader_platform_thread_unlock_mutex(&globalLock);
7653 if (VK_FALSE == skipCall)
7654 dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7655 }
7656
vkCmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)7657 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7658 uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7659 uint32_t firstInstance) {
7660 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7661 VkBool32 skipCall = VK_FALSE;
7662 loader_platform_thread_lock_mutex(&globalLock);
7663 #if MTMERGESOURCE
7664 // MTMTODO : merge with code below
7665 skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7666 #endif
7667 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7668 if (pCB) {
7669 skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7670 pCB->drawCount[DRAW_INDEXED]++;
7671 skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7672 // TODO : Need to pass commandBuffer as srcObj here
7673 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7674 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7675 "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7676 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7677 if (VK_FALSE == skipCall) {
7678 updateResourceTrackingOnDraw(pCB);
7679 }
7680 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7681 }
7682 loader_platform_thread_unlock_mutex(&globalLock);
7683 if (VK_FALSE == skipCall)
7684 dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7685 firstInstance);
7686 }
7687
7688 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)7689 vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7690 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7691 VkBool32 skipCall = VK_FALSE;
7692 loader_platform_thread_lock_mutex(&globalLock);
7693 #if MTMERGESOURCE
7694 VkDeviceMemory mem;
7695 // MTMTODO : merge with code below
7696 skipCall =
7697 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7698 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7699 skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7700 #endif
7701 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7702 if (pCB) {
7703 skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7704 pCB->drawCount[DRAW_INDIRECT]++;
7705 skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7706 // TODO : Need to pass commandBuffer as srcObj here
7707 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7708 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7709 "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7710 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7711 if (VK_FALSE == skipCall) {
7712 updateResourceTrackingOnDraw(pCB);
7713 }
7714 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7715 }
7716 loader_platform_thread_unlock_mutex(&globalLock);
7717 if (VK_FALSE == skipCall)
7718 dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7719 }
7720
7721 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)7722 vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7723 VkBool32 skipCall = VK_FALSE;
7724 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7725 loader_platform_thread_lock_mutex(&globalLock);
7726 #if MTMERGESOURCE
7727 VkDeviceMemory mem;
7728 // MTMTODO : merge with code below
7729 skipCall =
7730 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7731 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7732 skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7733 #endif
7734 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7735 if (pCB) {
7736 skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7737 pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7738 loader_platform_thread_unlock_mutex(&globalLock);
7739 skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7740 loader_platform_thread_lock_mutex(&globalLock);
7741 // TODO : Need to pass commandBuffer as srcObj here
7742 skipCall |=
7743 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7744 __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7745 g_drawCount[DRAW_INDEXED_INDIRECT]++);
7746 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7747 if (VK_FALSE == skipCall) {
7748 updateResourceTrackingOnDraw(pCB);
7749 }
7750 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7751 }
7752 loader_platform_thread_unlock_mutex(&globalLock);
7753 if (VK_FALSE == skipCall)
7754 dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7755 }
7756
vkCmdDispatch(VkCommandBuffer commandBuffer,uint32_t x,uint32_t y,uint32_t z)7757 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7758 VkBool32 skipCall = VK_FALSE;
7759 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7760 loader_platform_thread_lock_mutex(&globalLock);
7761 #if MTMERGESOURCE
7762 skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7763 #endif
7764 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7765 if (pCB) {
7766 skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7767 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7768 }
7769 loader_platform_thread_unlock_mutex(&globalLock);
7770 if (VK_FALSE == skipCall)
7771 dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7772 }
7773
7774 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset)7775 vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7776 VkBool32 skipCall = VK_FALSE;
7777 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7778 loader_platform_thread_lock_mutex(&globalLock);
7779 #if MTMERGESOURCE
7780 VkDeviceMemory mem;
7781 skipCall =
7782 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7783 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7784 skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7785 #endif
7786 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7787 if (pCB) {
7788 skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7789 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7790 }
7791 loader_platform_thread_unlock_mutex(&globalLock);
7792 if (VK_FALSE == skipCall)
7793 dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7794 }
7795
vkCmdCopyBuffer(VkCommandBuffer commandBuffer,VkBuffer srcBuffer,VkBuffer dstBuffer,uint32_t regionCount,const VkBufferCopy * pRegions)7796 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7797 uint32_t regionCount, const VkBufferCopy *pRegions) {
7798 VkBool32 skipCall = VK_FALSE;
7799 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7800 loader_platform_thread_lock_mutex(&globalLock);
7801 #if MTMERGESOURCE
7802 VkDeviceMemory mem;
7803 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7804 loader_platform_thread_lock_mutex(&globalLock);
7805 skipCall =
7806 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7807 if (cb_data != dev_data->commandBufferMap.end()) {
7808 std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7809 cb_data->second->validate_functions.push_back(function);
7810 }
7811 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7812 skipCall |=
7813 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7814 if (cb_data != dev_data->commandBufferMap.end()) {
7815 std::function<VkBool32()> function = [=]() {
7816 set_memory_valid(dev_data, mem, true);
7817 return VK_FALSE;
7818 };
7819 cb_data->second->validate_functions.push_back(function);
7820 }
7821 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7822 // Validate that SRC & DST buffers have correct usage flags set
7823 skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7824 "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7825 skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7826 "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7827 #endif
7828 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7829 if (pCB) {
7830 skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7831 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7832 }
7833 loader_platform_thread_unlock_mutex(&globalLock);
7834 if (VK_FALSE == skipCall)
7835 dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7836 }
7837
VerifySourceImageLayout(VkCommandBuffer cmdBuffer,VkImage srcImage,VkImageSubresourceLayers subLayers,VkImageLayout srcImageLayout)7838 VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7839 VkImageLayout srcImageLayout) {
7840 VkBool32 skip_call = VK_FALSE;
7841
7842 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7843 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7844 for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7845 uint32_t layer = i + subLayers.baseArrayLayer;
7846 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7847 IMAGE_CMD_BUF_LAYOUT_NODE node;
7848 if (!FindLayout(pCB, srcImage, sub, node)) {
7849 SetLayout(pCB, srcImage, sub, {srcImageLayout, srcImageLayout});
7850 continue;
7851 }
7852 if (node.layout != srcImageLayout) {
7853 // TODO: Improve log message in the next pass
7854 skip_call |=
7855 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7856 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7857 "and doesn't match the current layout %s.",
7858 string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7859 }
7860 }
7861 if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7862 if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7863 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7864 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7865 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7866 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7867 } else {
7868 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7869 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7870 "TRANSFER_SRC_OPTIMAL or GENERAL.",
7871 string_VkImageLayout(srcImageLayout));
7872 }
7873 }
7874 return skip_call;
7875 }
7876
VerifyDestImageLayout(VkCommandBuffer cmdBuffer,VkImage destImage,VkImageSubresourceLayers subLayers,VkImageLayout destImageLayout)7877 VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7878 VkImageLayout destImageLayout) {
7879 VkBool32 skip_call = VK_FALSE;
7880
7881 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7882 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7883 for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7884 uint32_t layer = i + subLayers.baseArrayLayer;
7885 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7886 IMAGE_CMD_BUF_LAYOUT_NODE node;
7887 if (!FindLayout(pCB, destImage, sub, node)) {
7888 SetLayout(pCB, destImage, sub, {destImageLayout, destImageLayout});
7889 continue;
7890 }
7891 if (node.layout != destImageLayout) {
7892 skip_call |=
7893 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7894 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7895 "doesn't match the current layout %s.",
7896 string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7897 }
7898 }
7899 if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7900 if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7901 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7902 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7903 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7904 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7905 } else {
7906 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7907 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7908 "TRANSFER_DST_OPTIMAL or GENERAL.",
7909 string_VkImageLayout(destImageLayout));
7910 }
7911 }
7912 return skip_call;
7913 }
7914
7915 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdCopyImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageCopy * pRegions)7916 vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7917 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7918 VkBool32 skipCall = VK_FALSE;
7919 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7920 loader_platform_thread_lock_mutex(&globalLock);
7921 #if MTMERGESOURCE
7922 VkDeviceMemory mem;
7923 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7924 // Validate that src & dst images have correct usage flags set
7925 skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7926 if (cb_data != dev_data->commandBufferMap.end()) {
7927 std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7928 cb_data->second->validate_functions.push_back(function);
7929 }
7930 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7931 skipCall |=
7932 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7933 if (cb_data != dev_data->commandBufferMap.end()) {
7934 std::function<VkBool32()> function = [=]() {
7935 set_memory_valid(dev_data, mem, true, dstImage);
7936 return VK_FALSE;
7937 };
7938 cb_data->second->validate_functions.push_back(function);
7939 }
7940 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7941 skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7942 "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7943 skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7944 "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7945 #endif
7946 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7947 if (pCB) {
7948 skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
7949 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
7950 for (uint32_t i = 0; i < regionCount; ++i) {
7951 skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7952 skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7953 }
7954 }
7955 loader_platform_thread_unlock_mutex(&globalLock);
7956 if (VK_FALSE == skipCall)
7957 dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7958 regionCount, pRegions);
7959 }
7960
7961 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdBlitImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageBlit * pRegions,VkFilter filter)7962 vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7963 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7964 VkBool32 skipCall = VK_FALSE;
7965 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7966 loader_platform_thread_lock_mutex(&globalLock);
7967 #if MTMERGESOURCE
7968 VkDeviceMemory mem;
7969 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7970 // Validate that src & dst images have correct usage flags set
7971 skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7972 if (cb_data != dev_data->commandBufferMap.end()) {
7973 std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
7974 cb_data->second->validate_functions.push_back(function);
7975 }
7976 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7977 skipCall |=
7978 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7979 if (cb_data != dev_data->commandBufferMap.end()) {
7980 std::function<VkBool32()> function = [=]() {
7981 set_memory_valid(dev_data, mem, true, dstImage);
7982 return VK_FALSE;
7983 };
7984 cb_data->second->validate_functions.push_back(function);
7985 }
7986 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7987 skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7988 "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7989 skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7990 "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7991 #endif
7992 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7993 if (pCB) {
7994 skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
7995 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
7996 }
7997 loader_platform_thread_unlock_mutex(&globalLock);
7998 if (VK_FALSE == skipCall)
7999 dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8000 regionCount, pRegions, filter);
8001 }
8002
vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer,VkBuffer srcBuffer,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkBufferImageCopy * pRegions)8003 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8004 VkImage dstImage, VkImageLayout dstImageLayout,
8005 uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8006 VkBool32 skipCall = VK_FALSE;
8007 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8008 loader_platform_thread_lock_mutex(&globalLock);
8009 #if MTMERGESOURCE
8010 VkDeviceMemory mem;
8011 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8012 skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8013 if (cb_data != dev_data->commandBufferMap.end()) {
8014 std::function<VkBool32()> function = [=]() {
8015 set_memory_valid(dev_data, mem, true, dstImage);
8016 return VK_FALSE;
8017 };
8018 cb_data->second->validate_functions.push_back(function);
8019 }
8020 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8021 skipCall |=
8022 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8023 if (cb_data != dev_data->commandBufferMap.end()) {
8024 std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
8025 cb_data->second->validate_functions.push_back(function);
8026 }
8027 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8028 // Validate that src buff & dst image have correct usage flags set
8029 skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8030 "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8031 skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8032 "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8033 #endif
8034 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8035 if (pCB) {
8036 skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8037 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
8038 for (uint32_t i = 0; i < regionCount; ++i) {
8039 skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8040 }
8041 }
8042 loader_platform_thread_unlock_mutex(&globalLock);
8043 if (VK_FALSE == skipCall)
8044 dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8045 pRegions);
8046 }
8047
vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkBuffer dstBuffer,uint32_t regionCount,const VkBufferImageCopy * pRegions)8048 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8049 VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8050 uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8051 VkBool32 skipCall = VK_FALSE;
8052 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8053 loader_platform_thread_lock_mutex(&globalLock);
8054 #if MTMERGESOURCE
8055 VkDeviceMemory mem;
8056 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8057 skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8058 if (cb_data != dev_data->commandBufferMap.end()) {
8059 std::function<VkBool32()> function =
8060 [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
8061 cb_data->second->validate_functions.push_back(function);
8062 }
8063 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8064 skipCall |=
8065 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8066 if (cb_data != dev_data->commandBufferMap.end()) {
8067 std::function<VkBool32()> function = [=]() {
8068 set_memory_valid(dev_data, mem, true);
8069 return VK_FALSE;
8070 };
8071 cb_data->second->validate_functions.push_back(function);
8072 }
8073 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8074 // Validate that dst buff & src image have correct usage flags set
8075 skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8076 "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8077 skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8078 "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8079 #endif
8080 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8081 if (pCB) {
8082 skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8083 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
8084 for (uint32_t i = 0; i < regionCount; ++i) {
8085 skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8086 }
8087 }
8088 loader_platform_thread_unlock_mutex(&globalLock);
8089 if (VK_FALSE == skipCall)
8090 dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8091 pRegions);
8092 }
8093
vkCmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const uint32_t * pData)8094 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8095 VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8096 VkBool32 skipCall = VK_FALSE;
8097 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8098 loader_platform_thread_lock_mutex(&globalLock);
8099 #if MTMERGESOURCE
8100 VkDeviceMemory mem;
8101 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8102 skipCall =
8103 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8104 if (cb_data != dev_data->commandBufferMap.end()) {
8105 std::function<VkBool32()> function = [=]() {
8106 set_memory_valid(dev_data, mem, true);
8107 return VK_FALSE;
8108 };
8109 cb_data->second->validate_functions.push_back(function);
8110 }
8111 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
8112 // Validate that dst buff has correct usage flags set
8113 skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8114 "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8115 #endif
8116 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8117 if (pCB) {
8118 skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8119 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
8120 }
8121 loader_platform_thread_unlock_mutex(&globalLock);
8122 if (VK_FALSE == skipCall)
8123 dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8124 }
8125
8126 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdFillBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize size,uint32_t data)8127 vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8128 VkBool32 skipCall = VK_FALSE;
8129 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8130 loader_platform_thread_lock_mutex(&globalLock);
8131 #if MTMERGESOURCE
8132 VkDeviceMemory mem;
8133 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8134 skipCall =
8135 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8136 if (cb_data != dev_data->commandBufferMap.end()) {
8137 std::function<VkBool32()> function = [=]() {
8138 set_memory_valid(dev_data, mem, true);
8139 return VK_FALSE;
8140 };
8141 cb_data->second->validate_functions.push_back(function);
8142 }
8143 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
8144 // Validate that dst buff has correct usage flags set
8145 skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8146 "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8147 #endif
8148 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8149 if (pCB) {
8150 skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8151 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
8152 }
8153 loader_platform_thread_unlock_mutex(&globalLock);
8154 if (VK_FALSE == skipCall)
8155 dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8156 }
8157
vkCmdClearAttachments(VkCommandBuffer commandBuffer,uint32_t attachmentCount,const VkClearAttachment * pAttachments,uint32_t rectCount,const VkClearRect * pRects)8158 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8159 const VkClearAttachment *pAttachments, uint32_t rectCount,
8160 const VkClearRect *pRects) {
8161 VkBool32 skipCall = VK_FALSE;
8162 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8163 loader_platform_thread_lock_mutex(&globalLock);
8164 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8165 if (pCB) {
8166 skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8167 // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8168 if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8169 (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8170 // TODO : commandBuffer should be srcObj
8171 // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8172 // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8173 // call CmdClearAttachments
8174 // Otherwise this seems more like a performance warning.
8175 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8176 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8177 "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8178 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8179 (uint64_t)(commandBuffer));
8180 }
8181 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
8182 }
8183
8184 // Validate that attachment is in reference list of active subpass
8185 if (pCB->activeRenderPass) {
8186 const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
8187 const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8188
8189 for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8190 const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8191 if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8192 VkBool32 found = VK_FALSE;
8193 for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
8194 if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
8195 found = VK_TRUE;
8196 break;
8197 }
8198 }
8199 if (VK_FALSE == found) {
8200 skipCall |= log_msg(
8201 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8202 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8203 "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
8204 attachment->colorAttachment, pCB->activeSubpass);
8205 }
8206 } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8207 if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8208 (pSD->pDepthStencilAttachment->attachment ==
8209 VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8210
8211 skipCall |= log_msg(
8212 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8213 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8214 "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
8215 "in active subpass %d",
8216 attachment->colorAttachment,
8217 (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
8218 pCB->activeSubpass);
8219 }
8220 }
8221 }
8222 }
8223 loader_platform_thread_unlock_mutex(&globalLock);
8224 if (VK_FALSE == skipCall)
8225 dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8226 }
8227
vkCmdClearColorImage(VkCommandBuffer commandBuffer,VkImage image,VkImageLayout imageLayout,const VkClearColorValue * pColor,uint32_t rangeCount,const VkImageSubresourceRange * pRanges)8228 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8229 VkImageLayout imageLayout, const VkClearColorValue *pColor,
8230 uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8231 VkBool32 skipCall = VK_FALSE;
8232 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8233 loader_platform_thread_lock_mutex(&globalLock);
8234 #if MTMERGESOURCE
8235 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8236 VkDeviceMemory mem;
8237 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8238 skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8239 if (cb_data != dev_data->commandBufferMap.end()) {
8240 std::function<VkBool32()> function = [=]() {
8241 set_memory_valid(dev_data, mem, true, image);
8242 return VK_FALSE;
8243 };
8244 cb_data->second->validate_functions.push_back(function);
8245 }
8246 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
8247 #endif
8248 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8249 if (pCB) {
8250 skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8251 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
8252 }
8253 loader_platform_thread_unlock_mutex(&globalLock);
8254 if (VK_FALSE == skipCall)
8255 dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8256 }
8257
8258 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer,VkImage image,VkImageLayout imageLayout,const VkClearDepthStencilValue * pDepthStencil,uint32_t rangeCount,const VkImageSubresourceRange * pRanges)8259 vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8260 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8261 const VkImageSubresourceRange *pRanges) {
8262 VkBool32 skipCall = VK_FALSE;
8263 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8264 loader_platform_thread_lock_mutex(&globalLock);
8265 #if MTMERGESOURCE
8266 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8267 VkDeviceMemory mem;
8268 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8269 skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8270 if (cb_data != dev_data->commandBufferMap.end()) {
8271 std::function<VkBool32()> function = [=]() {
8272 set_memory_valid(dev_data, mem, true, image);
8273 return VK_FALSE;
8274 };
8275 cb_data->second->validate_functions.push_back(function);
8276 }
8277 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
8278 #endif
8279 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8280 if (pCB) {
8281 skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8282 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
8283 }
8284 loader_platform_thread_unlock_mutex(&globalLock);
8285 if (VK_FALSE == skipCall)
8286 dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8287 pRanges);
8288 }
8289
8290 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdResolveImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageResolve * pRegions)8291 vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8292 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8293 VkBool32 skipCall = VK_FALSE;
8294 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8295 loader_platform_thread_lock_mutex(&globalLock);
8296 #if MTMERGESOURCE
8297 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8298 VkDeviceMemory mem;
8299 skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8300 if (cb_data != dev_data->commandBufferMap.end()) {
8301 std::function<VkBool32()> function =
8302 [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
8303 cb_data->second->validate_functions.push_back(function);
8304 }
8305 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8306 skipCall |=
8307 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8308 if (cb_data != dev_data->commandBufferMap.end()) {
8309 std::function<VkBool32()> function = [=]() {
8310 set_memory_valid(dev_data, mem, true, dstImage);
8311 return VK_FALSE;
8312 };
8313 cb_data->second->validate_functions.push_back(function);
8314 }
8315 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8316 #endif
8317 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8318 if (pCB) {
8319 skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8320 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
8321 }
8322 loader_platform_thread_unlock_mutex(&globalLock);
8323 if (VK_FALSE == skipCall)
8324 dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8325 regionCount, pRegions);
8326 }
8327
setEventStageMask(VkQueue queue,VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)8328 bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8329 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8330 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8331 if (pCB) {
8332 pCB->eventToStageMap[event] = stageMask;
8333 }
8334 auto queue_data = dev_data->queueMap.find(queue);
8335 if (queue_data != dev_data->queueMap.end()) {
8336 queue_data->second.eventToStageMap[event] = stageMask;
8337 }
8338 return false;
8339 }
8340
8341 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)8342 vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8343 VkBool32 skipCall = VK_FALSE;
8344 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8345 loader_platform_thread_lock_mutex(&globalLock);
8346 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8347 if (pCB) {
8348 skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8349 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8350 pCB->events.push_back(event);
8351 std::function<bool(VkQueue)> eventUpdate =
8352 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8353 pCB->eventUpdates.push_back(eventUpdate);
8354 }
8355 loader_platform_thread_unlock_mutex(&globalLock);
8356 if (VK_FALSE == skipCall)
8357 dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8358 }
8359
8360 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)8361 vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8362 VkBool32 skipCall = VK_FALSE;
8363 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8364 loader_platform_thread_lock_mutex(&globalLock);
8365 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8366 if (pCB) {
8367 skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8368 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8369 pCB->events.push_back(event);
8370 std::function<bool(VkQueue)> eventUpdate =
8371 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8372 pCB->eventUpdates.push_back(eventUpdate);
8373 }
8374 loader_platform_thread_unlock_mutex(&globalLock);
8375 if (VK_FALSE == skipCall)
8376 dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8377 }
8378
TransitionImageLayouts(VkCommandBuffer cmdBuffer,uint32_t memBarrierCount,const VkImageMemoryBarrier * pImgMemBarriers)8379 VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) {
8380 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8381 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8382 VkBool32 skip = VK_FALSE;
8383 uint32_t levelCount = 0;
8384 uint32_t layerCount = 0;
8385
8386 for (uint32_t i = 0; i < memBarrierCount; ++i) {
8387 auto mem_barrier = &pImgMemBarriers[i];
8388 if (!mem_barrier)
8389 continue;
8390 // TODO: Do not iterate over every possibility - consolidate where
8391 // possible
8392 ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8393
8394 for (uint32_t j = 0; j < levelCount; j++) {
8395 uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8396 for (uint32_t k = 0; k < layerCount; k++) {
8397 uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8398 VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8399 IMAGE_CMD_BUF_LAYOUT_NODE node;
8400 if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8401 SetLayout(pCB, mem_barrier->image, sub, {mem_barrier->oldLayout, mem_barrier->newLayout});
8402 continue;
8403 }
8404 if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8405 // TODO: Set memory invalid which is in mem_tracker currently
8406 } else if (node.layout != mem_barrier->oldLayout) {
8407 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8408 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8409 "when current layout is %s.",
8410 string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8411 }
8412 SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8413 }
8414 }
8415 }
8416 return skip;
8417 }
8418
8419 // Print readable FlagBits in FlagMask
string_VkAccessFlags(VkAccessFlags accessMask)8420 std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8421 std::string result;
8422 std::string separator;
8423
8424 if (accessMask == 0) {
8425 result = "[None]";
8426 } else {
8427 result = "[";
8428 for (auto i = 0; i < 32; i++) {
8429 if (accessMask & (1 << i)) {
8430 result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8431 separator = " | ";
8432 }
8433 }
8434 result = result + "]";
8435 }
8436 return result;
8437 }
8438
8439 // AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8440 // If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8441 // TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
ValidateMaskBits(const layer_data * my_data,VkCommandBuffer cmdBuffer,const VkAccessFlags & accessMask,const VkImageLayout & layout,VkAccessFlags required_bit,VkAccessFlags optional_bits,const char * type)8442 VkBool32 ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8443 const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, const char *type) {
8444 VkBool32 skip_call = VK_FALSE;
8445
8446 if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8447 if (accessMask & !(required_bit | optional_bits)) {
8448 // TODO: Verify against Valid Use
8449 skip_call |=
8450 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8451 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8452 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8453 }
8454 } else {
8455 if (!required_bit) {
8456 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8457 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8458 "%s when layout is %s, unless the app has previously added a "
8459 "barrier for this transition.",
8460 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8461 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8462 } else {
8463 std::string opt_bits;
8464 if (optional_bits != 0) {
8465 std::stringstream ss;
8466 ss << optional_bits;
8467 opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8468 }
8469 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8470 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8471 "layout is %s, unless the app has previously added a barrier for "
8472 "this transition.",
8473 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8474 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8475 }
8476 }
8477 return skip_call;
8478 }
8479
ValidateMaskBitsFromLayouts(const layer_data * my_data,VkCommandBuffer cmdBuffer,const VkAccessFlags & accessMask,const VkImageLayout & layout,const char * type)8480 VkBool32 ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8481 const VkImageLayout &layout, const char *type) {
8482 VkBool32 skip_call = VK_FALSE;
8483 switch (layout) {
8484 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8485 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8486 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8487 break;
8488 }
8489 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8490 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8491 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8492 break;
8493 }
8494 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8495 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8496 break;
8497 }
8498 case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8499 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8500 break;
8501 }
8502 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8503 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8504 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8505 break;
8506 }
8507 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8508 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8509 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8510 break;
8511 }
8512 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8513 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8514 break;
8515 }
8516 case VK_IMAGE_LAYOUT_UNDEFINED: {
8517 if (accessMask != 0) {
8518 // TODO: Verify against Valid Use section spec
8519 skip_call |=
8520 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8521 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8522 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8523 }
8524 break;
8525 }
8526 case VK_IMAGE_LAYOUT_GENERAL:
8527 default: { break; }
8528 }
8529 return skip_call;
8530 }
8531
ValidateBarriers(const char * funcName,VkCommandBuffer cmdBuffer,uint32_t memBarrierCount,const VkMemoryBarrier * pMemBarriers,uint32_t bufferBarrierCount,const VkBufferMemoryBarrier * pBufferMemBarriers,uint32_t imageMemBarrierCount,const VkImageMemoryBarrier * pImageMemBarriers)8532 VkBool32 ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8533 const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8534 const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8535 const VkImageMemoryBarrier *pImageMemBarriers) {
8536 VkBool32 skip_call = VK_FALSE;
8537 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8538 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8539 if (pCB->activeRenderPass && memBarrierCount) {
8540 if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8541 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8542 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8543 "with no self dependency specified.",
8544 funcName, pCB->activeSubpass);
8545 }
8546 }
8547 for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8548 auto mem_barrier = &pImageMemBarriers[i];
8549 auto image_data = dev_data->imageMap.find(mem_barrier->image);
8550 if (image_data != dev_data->imageMap.end()) {
8551 uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8552 uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8553 if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8554 // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8555 // be VK_QUEUE_FAMILY_IGNORED
8556 if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8557 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8558 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8559 "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8560 "VK_SHARING_MODE_CONCURRENT. Src and dst "
8561 " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8562 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8563 }
8564 } else {
8565 // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8566 // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8567 // or both be a valid queue family
8568 if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8569 (src_q_f_index != dst_q_f_index)) {
8570 skip_call |=
8571 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8572 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8573 "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8574 "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8575 "must be.",
8576 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8577 } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8578 ((src_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()) ||
8579 (dst_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()))) {
8580 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8581 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8582 "%s: Image 0x%" PRIx64 " was created with sharingMode "
8583 "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8584 " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8585 "queueFamilies crated for this device.",
8586 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8587 dst_q_f_index, dev_data->physDevProperties.queue_family_properties.size());
8588 }
8589 }
8590 }
8591
8592 if (mem_barrier) {
8593 skip_call |=
8594 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8595 skip_call |=
8596 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8597 if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8598 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8599 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8600 "PREINITIALIZED.",
8601 funcName);
8602 }
8603 auto image_data = dev_data->imageMap.find(mem_barrier->image);
8604 VkFormat format;
8605 uint32_t arrayLayers, mipLevels;
8606 bool imageFound = false;
8607 if (image_data != dev_data->imageMap.end()) {
8608 format = image_data->second.createInfo.format;
8609 arrayLayers = image_data->second.createInfo.arrayLayers;
8610 mipLevels = image_data->second.createInfo.mipLevels;
8611 imageFound = true;
8612 } else if (dev_data->device_extensions.wsi_enabled) {
8613 auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8614 if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8615 auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8616 if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8617 format = swapchain_data->second->createInfo.imageFormat;
8618 arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8619 mipLevels = 1;
8620 imageFound = true;
8621 }
8622 }
8623 }
8624 if (imageFound) {
8625 if (vk_format_is_depth_and_stencil(format) &&
8626 (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8627 !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8628 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8629 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8630 "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8631 "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8632 funcName);
8633 }
8634 int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8635 ? 1
8636 : mem_barrier->subresourceRange.layerCount;
8637 if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8638 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8639 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8640 "baseArrayLayer (%d) and layerCount (%d) be less "
8641 "than or equal to the total number of layers (%d).",
8642 funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8643 arrayLayers);
8644 }
8645 int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8646 ? 1
8647 : mem_barrier->subresourceRange.levelCount;
8648 if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8649 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8650 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8651 "(%d) and levelCount (%d) be less than or equal to "
8652 "the total number of levels (%d).",
8653 funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8654 mipLevels);
8655 }
8656 }
8657 }
8658 }
8659 for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8660 auto mem_barrier = &pBufferMemBarriers[i];
8661 if (pCB->activeRenderPass) {
8662 skip_call |=
8663 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8664 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8665 }
8666 if (!mem_barrier)
8667 continue;
8668
8669 // Validate buffer barrier queue family indices
8670 if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8671 mem_barrier->srcQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size()) ||
8672 (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8673 mem_barrier->dstQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size())) {
8674 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8675 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8676 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8677 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8678 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8679 dev_data->physDevProperties.queue_family_properties.size());
8680 }
8681
8682 auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8683 uint64_t buffer_size =
8684 buffer_data->second.create_info ? reinterpret_cast<uint64_t &>(buffer_data->second.create_info->size) : 0;
8685 if (buffer_data != dev_data->bufferMap.end()) {
8686 if (mem_barrier->offset >= buffer_size) {
8687 skip_call |=
8688 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8689 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
8690 " whose sum is not less than total size %" PRIu64 ".",
8691 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8692 reinterpret_cast<const uint64_t &>(mem_barrier->offset), buffer_size);
8693 } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8694 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8695 __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8696 "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8697 " whose sum is greater than total size %" PRIu64 ".",
8698 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8699 reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8700 reinterpret_cast<const uint64_t &>(mem_barrier->size), buffer_size);
8701 }
8702 }
8703 }
8704 return skip_call;
8705 }
8706
validateEventStageMask(VkQueue queue,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags sourceStageMask)8707 bool validateEventStageMask(VkQueue queue, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask) {
8708 bool skip_call = false;
8709 VkPipelineStageFlags stageMask = 0;
8710 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8711 for (uint32_t i = 0; i < eventCount; ++i) {
8712 auto queue_data = dev_data->queueMap.find(queue);
8713 if (queue_data == dev_data->queueMap.end())
8714 return false;
8715 auto event_data = queue_data->second.eventToStageMap.find(pEvents[i]);
8716 if (event_data != queue_data->second.eventToStageMap.end()) {
8717 stageMask |= event_data->second;
8718 } else {
8719 auto global_event_data = dev_data->eventMap.find(pEvents[i]);
8720 if (global_event_data == dev_data->eventMap.end()) {
8721 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8722 reinterpret_cast<const uint64_t &>(pEvents[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
8723 "Fence 0x%" PRIx64 " cannot be waited on if it has never been set.",
8724 reinterpret_cast<const uint64_t &>(pEvents[i]));
8725 } else {
8726 stageMask |= global_event_data->second.stageMask;
8727 }
8728 }
8729 }
8730 if (sourceStageMask != stageMask) {
8731 skip_call |=
8732 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8733 DRAWSTATE_INVALID_FENCE, "DS",
8734 "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
8735 "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
8736 sourceStageMask);
8737 }
8738 return skip_call;
8739 }
8740
8741 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags sourceStageMask,VkPipelineStageFlags dstStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)8742 vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8743 VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8744 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8745 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8746 VkBool32 skipCall = VK_FALSE;
8747 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8748 loader_platform_thread_lock_mutex(&globalLock);
8749 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8750 if (pCB) {
8751 for (uint32_t i = 0; i < eventCount; ++i) {
8752 pCB->waitedEvents.push_back(pEvents[i]);
8753 pCB->events.push_back(pEvents[i]);
8754 }
8755 std::function<bool(VkQueue)> eventUpdate =
8756 std::bind(validateEventStageMask, std::placeholders::_1, eventCount, pEvents, sourceStageMask);
8757 pCB->eventUpdates.push_back(eventUpdate);
8758 if (pCB->state == CB_RECORDING) {
8759 skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8760 } else {
8761 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8762 }
8763 skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8764 skipCall |=
8765 ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8766 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8767 }
8768 loader_platform_thread_unlock_mutex(&globalLock);
8769 if (VK_FALSE == skipCall)
8770 dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8771 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8772 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8773 }
8774
8775 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)8776 vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8777 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8778 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8779 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8780 VkBool32 skipCall = VK_FALSE;
8781 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8782 loader_platform_thread_lock_mutex(&globalLock);
8783 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8784 if (pCB) {
8785 skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8786 skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8787 skipCall |=
8788 ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8789 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8790 }
8791 loader_platform_thread_unlock_mutex(&globalLock);
8792 if (VK_FALSE == skipCall)
8793 dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8794 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8795 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8796 }
8797
8798 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdBeginQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t slot,VkFlags flags)8799 vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8800 VkBool32 skipCall = VK_FALSE;
8801 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8802 loader_platform_thread_lock_mutex(&globalLock);
8803 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8804 if (pCB) {
8805 QueryObject query = {queryPool, slot};
8806 pCB->activeQueries.insert(query);
8807 if (!pCB->startedQueries.count(query)) {
8808 pCB->startedQueries.insert(query);
8809 }
8810 skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8811 }
8812 loader_platform_thread_unlock_mutex(&globalLock);
8813 if (VK_FALSE == skipCall)
8814 dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8815 }
8816
vkCmdEndQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t slot)8817 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8818 VkBool32 skipCall = VK_FALSE;
8819 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8820 loader_platform_thread_lock_mutex(&globalLock);
8821 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8822 if (pCB) {
8823 QueryObject query = {queryPool, slot};
8824 if (!pCB->activeQueries.count(query)) {
8825 skipCall |=
8826 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8827 DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8828 (uint64_t)(queryPool), slot);
8829 } else {
8830 pCB->activeQueries.erase(query);
8831 }
8832 pCB->queryToStateMap[query] = 1;
8833 if (pCB->state == CB_RECORDING) {
8834 skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8835 } else {
8836 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8837 }
8838 }
8839 loader_platform_thread_unlock_mutex(&globalLock);
8840 if (VK_FALSE == skipCall)
8841 dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8842 }
8843
8844 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdResetQueryPool(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)8845 vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8846 VkBool32 skipCall = VK_FALSE;
8847 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8848 loader_platform_thread_lock_mutex(&globalLock);
8849 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8850 if (pCB) {
8851 for (uint32_t i = 0; i < queryCount; i++) {
8852 QueryObject query = {queryPool, firstQuery + i};
8853 pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8854 pCB->queryToStateMap[query] = 0;
8855 }
8856 if (pCB->state == CB_RECORDING) {
8857 skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8858 } else {
8859 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8860 }
8861 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8862 }
8863 loader_platform_thread_unlock_mutex(&globalLock);
8864 if (VK_FALSE == skipCall)
8865 dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8866 }
8867
8868 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)8869 vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8870 VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8871 VkBool32 skipCall = VK_FALSE;
8872 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8873 loader_platform_thread_lock_mutex(&globalLock);
8874 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8875 #if MTMERGESOURCE
8876 VkDeviceMemory mem;
8877 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8878 skipCall |=
8879 get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8880 if (cb_data != dev_data->commandBufferMap.end()) {
8881 std::function<VkBool32()> function = [=]() {
8882 set_memory_valid(dev_data, mem, true);
8883 return VK_FALSE;
8884 };
8885 cb_data->second->validate_functions.push_back(function);
8886 }
8887 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8888 // Validate that DST buffer has correct usage flags set
8889 skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8890 "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8891 #endif
8892 if (pCB) {
8893 for (uint32_t i = 0; i < queryCount; i++) {
8894 QueryObject query = {queryPool, firstQuery + i};
8895 if (!pCB->queryToStateMap[query]) {
8896 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8897 __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8898 "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8899 (uint64_t)(queryPool), firstQuery + i);
8900 }
8901 }
8902 if (pCB->state == CB_RECORDING) {
8903 skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8904 } else {
8905 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8906 }
8907 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8908 }
8909 loader_platform_thread_unlock_mutex(&globalLock);
8910 if (VK_FALSE == skipCall)
8911 dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8912 dstOffset, stride, flags);
8913 }
8914
vkCmdPushConstants(VkCommandBuffer commandBuffer,VkPipelineLayout layout,VkShaderStageFlags stageFlags,uint32_t offset,uint32_t size,const void * pValues)8915 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8916 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8917 const void *pValues) {
8918 bool skipCall = false;
8919 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8920 loader_platform_thread_lock_mutex(&globalLock);
8921 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8922 if (pCB) {
8923 if (pCB->state == CB_RECORDING) {
8924 skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8925 } else {
8926 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8927 }
8928 }
8929 if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
8930 skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8931 }
8932 // TODO : Add warning if push constant update doesn't align with range
8933 loader_platform_thread_unlock_mutex(&globalLock);
8934 if (!skipCall)
8935 dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8936 }
8937
8938 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t slot)8939 vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8940 VkBool32 skipCall = VK_FALSE;
8941 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8942 loader_platform_thread_lock_mutex(&globalLock);
8943 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8944 if (pCB) {
8945 QueryObject query = {queryPool, slot};
8946 pCB->queryToStateMap[query] = 1;
8947 if (pCB->state == CB_RECORDING) {
8948 skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8949 } else {
8950 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8951 }
8952 }
8953 loader_platform_thread_unlock_mutex(&globalLock);
8954 if (VK_FALSE == skipCall)
8955 dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8956 }
8957
vkCreateFramebuffer(VkDevice device,const VkFramebufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFramebuffer * pFramebuffer)8958 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8959 const VkAllocationCallbacks *pAllocator,
8960 VkFramebuffer *pFramebuffer) {
8961 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8962 VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8963 if (VK_SUCCESS == result) {
8964 // Shadow create info and store in map
8965 VkFramebufferCreateInfo *localFBCI = new VkFramebufferCreateInfo(*pCreateInfo);
8966 if (pCreateInfo->pAttachments) {
8967 localFBCI->pAttachments = new VkImageView[localFBCI->attachmentCount];
8968 memcpy((void *)localFBCI->pAttachments, pCreateInfo->pAttachments, localFBCI->attachmentCount * sizeof(VkImageView));
8969 }
8970 FRAMEBUFFER_NODE fbNode = {};
8971 fbNode.createInfo = *localFBCI;
8972 std::pair<VkFramebuffer, FRAMEBUFFER_NODE> fbPair(*pFramebuffer, fbNode);
8973 loader_platform_thread_lock_mutex(&globalLock);
8974 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8975 VkImageView view = pCreateInfo->pAttachments[i];
8976 auto view_data = dev_data->imageViewMap.find(view);
8977 if (view_data == dev_data->imageViewMap.end()) {
8978 continue;
8979 }
8980 MT_FB_ATTACHMENT_INFO fb_info;
8981 get_mem_binding_from_object(dev_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8982 &fb_info.mem);
8983 fb_info.image = view_data->second.image;
8984 fbPair.second.attachments.push_back(fb_info);
8985 }
8986 dev_data->frameBufferMap.insert(fbPair);
8987 loader_platform_thread_unlock_mutex(&globalLock);
8988 }
8989 return result;
8990 }
8991
FindDependency(const int index,const int dependent,const std::vector<DAGNode> & subpass_to_node,std::unordered_set<uint32_t> & processed_nodes)8992 VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8993 std::unordered_set<uint32_t> &processed_nodes) {
8994 // If we have already checked this node we have not found a dependency path so return false.
8995 if (processed_nodes.count(index))
8996 return VK_FALSE;
8997 processed_nodes.insert(index);
8998 const DAGNode &node = subpass_to_node[index];
8999 // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9000 if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9001 for (auto elem : node.prev) {
9002 if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9003 return VK_TRUE;
9004 }
9005 } else {
9006 return VK_TRUE;
9007 }
9008 return VK_FALSE;
9009 }
9010
CheckDependencyExists(const layer_data * my_data,const int subpass,const std::vector<uint32_t> & dependent_subpasses,const std::vector<DAGNode> & subpass_to_node,VkBool32 & skip_call)9011 VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9012 const std::vector<DAGNode> &subpass_to_node, VkBool32 &skip_call) {
9013 VkBool32 result = VK_TRUE;
9014 // Loop through all subpasses that share the same attachment and make sure a dependency exists
9015 for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9016 if (subpass == dependent_subpasses[k])
9017 continue;
9018 const DAGNode &node = subpass_to_node[subpass];
9019 // Check for a specified dependency between the two nodes. If one exists we are done.
9020 auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9021 auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9022 if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9023 // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
9024 std::unordered_set<uint32_t> processed_nodes;
9025 if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9026 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
9027 // TODO: Verify against Valid Use section of spec
9028 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9029 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9030 "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
9031 subpass, dependent_subpasses[k]);
9032 } else {
9033 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9034 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9035 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9036 dependent_subpasses[k]);
9037 result = VK_FALSE;
9038 }
9039 }
9040 }
9041 return result;
9042 }
9043
CheckPreserved(const layer_data * my_data,const VkRenderPassCreateInfo * pCreateInfo,const int index,const uint32_t attachment,const std::vector<DAGNode> & subpass_to_node,int depth,VkBool32 & skip_call)9044 VkBool32 CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9045 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, VkBool32 &skip_call) {
9046 const DAGNode &node = subpass_to_node[index];
9047 // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9048 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9049 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9050 if (attachment == subpass.pColorAttachments[j].attachment)
9051 return VK_TRUE;
9052 }
9053 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9054 if (attachment == subpass.pDepthStencilAttachment->attachment)
9055 return VK_TRUE;
9056 }
9057 VkBool32 result = VK_FALSE;
9058 // Loop through previous nodes and see if any of them write to the attachment.
9059 for (auto elem : node.prev) {
9060 result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9061 }
9062 // If the attachment was written to by a previous node than this node needs to preserve it.
9063 if (result && depth > 0) {
9064 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9065 VkBool32 has_preserved = VK_FALSE;
9066 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9067 if (subpass.pPreserveAttachments[j] == attachment) {
9068 has_preserved = VK_TRUE;
9069 break;
9070 }
9071 }
9072 if (has_preserved == VK_FALSE) {
9073 skip_call |=
9074 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9075 DRAWSTATE_INVALID_RENDERPASS, "DS",
9076 "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9077 }
9078 }
9079 return result;
9080 }
9081
isRangeOverlapping(T offset1,T size1,T offset2,T size2)9082 template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9083 return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9084 ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9085 }
9086
isRegionOverlapping(VkImageSubresourceRange range1,VkImageSubresourceRange range2)9087 bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9088 return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9089 isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9090 }
9091
ValidateDependencies(const layer_data * my_data,const VkRenderPassBeginInfo * pRenderPassBegin,const std::vector<DAGNode> & subpass_to_node)9092 VkBool32 ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
9093 const std::vector<DAGNode> &subpass_to_node) {
9094 VkBool32 skip_call = VK_FALSE;
9095 const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9096 const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
9097 std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9098 std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9099 std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9100 // Find overlapping attachments
9101 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9102 for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9103 VkImageView viewi = pFramebufferInfo->pAttachments[i];
9104 VkImageView viewj = pFramebufferInfo->pAttachments[j];
9105 if (viewi == viewj) {
9106 overlapping_attachments[i].push_back(j);
9107 overlapping_attachments[j].push_back(i);
9108 continue;
9109 }
9110 auto view_data_i = my_data->imageViewMap.find(viewi);
9111 auto view_data_j = my_data->imageViewMap.find(viewj);
9112 if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
9113 continue;
9114 }
9115 if (view_data_i->second.image == view_data_j->second.image &&
9116 isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
9117 overlapping_attachments[i].push_back(j);
9118 overlapping_attachments[j].push_back(i);
9119 continue;
9120 }
9121 auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
9122 auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
9123 if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
9124 continue;
9125 }
9126 if (image_data_i->second.mem == image_data_j->second.mem &&
9127 isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
9128 image_data_j->second.memSize)) {
9129 overlapping_attachments[i].push_back(j);
9130 overlapping_attachments[j].push_back(i);
9131 }
9132 }
9133 }
9134 for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9135 uint32_t attachment = i;
9136 for (auto other_attachment : overlapping_attachments[i]) {
9137 if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9138 skip_call |=
9139 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9140 DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9141 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9142 attachment, other_attachment);
9143 }
9144 if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9145 skip_call |=
9146 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9147 DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9148 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9149 other_attachment, attachment);
9150 }
9151 }
9152 }
9153 // Find for each attachment the subpasses that use them.
9154 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9155 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9156 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9157 uint32_t attachment = subpass.pInputAttachments[j].attachment;
9158 input_attachment_to_subpass[attachment].push_back(i);
9159 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9160 input_attachment_to_subpass[overlapping_attachment].push_back(i);
9161 }
9162 }
9163 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9164 uint32_t attachment = subpass.pColorAttachments[j].attachment;
9165 output_attachment_to_subpass[attachment].push_back(i);
9166 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9167 output_attachment_to_subpass[overlapping_attachment].push_back(i);
9168 }
9169 }
9170 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9171 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9172 output_attachment_to_subpass[attachment].push_back(i);
9173 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9174 output_attachment_to_subpass[overlapping_attachment].push_back(i);
9175 }
9176 }
9177 }
9178 // If there is a dependency needed make sure one exists
9179 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9180 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9181 // If the attachment is an input then all subpasses that output must have a dependency relationship
9182 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9183 const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
9184 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9185 }
9186 // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9187 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9188 const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
9189 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9190 CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9191 }
9192 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9193 const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9194 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9195 CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9196 }
9197 }
9198 // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9199 // written.
9200 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9201 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9202 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9203 CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9204 }
9205 }
9206 return skip_call;
9207 }
9208
ValidateLayouts(const layer_data * my_data,VkDevice device,const VkRenderPassCreateInfo * pCreateInfo)9209 VkBool32 ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9210 VkBool32 skip = VK_FALSE;
9211
9212 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9213 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9214 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9215 if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
9216 subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9217 if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9218 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9219 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9220 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9221 "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9222 } else {
9223 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9224 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9225 "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9226 string_VkImageLayout(subpass.pInputAttachments[j].layout));
9227 }
9228 }
9229 }
9230 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9231 if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
9232 if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9233 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9234 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9235 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9236 "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9237 } else {
9238 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9239 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9240 "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9241 string_VkImageLayout(subpass.pColorAttachments[j].layout));
9242 }
9243 }
9244 }
9245 if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9246 if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
9247 if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
9248 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9249 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9250 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9251 "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
9252 } else {
9253 skip |=
9254 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9255 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9256 "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
9257 string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9258 }
9259 }
9260 }
9261 }
9262 return skip;
9263 }
9264
CreatePassDAG(const layer_data * my_data,VkDevice device,const VkRenderPassCreateInfo * pCreateInfo,std::vector<DAGNode> & subpass_to_node,std::vector<bool> & has_self_dependency)9265 VkBool32 CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9266 std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9267 VkBool32 skip_call = VK_FALSE;
9268 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9269 DAGNode &subpass_node = subpass_to_node[i];
9270 subpass_node.pass = i;
9271 }
9272 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9273 const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9274 if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9275 dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9276 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9277 DRAWSTATE_INVALID_RENDERPASS, "DS",
9278 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9279 } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9280 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9281 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9282 } else if (dependency.srcSubpass == dependency.dstSubpass) {
9283 has_self_dependency[dependency.srcSubpass] = true;
9284 }
9285 if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9286 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9287 }
9288 if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9289 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9290 }
9291 }
9292 return skip_call;
9293 }
9294
9295
vkCreateShaderModule(VkDevice device,const VkShaderModuleCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkShaderModule * pShaderModule)9296 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9297 const VkAllocationCallbacks *pAllocator,
9298 VkShaderModule *pShaderModule) {
9299 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9300 VkBool32 skip_call = VK_FALSE;
9301 if (!shader_is_spirv(pCreateInfo)) {
9302 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9303 /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
9304 }
9305
9306 if (VK_FALSE != skip_call)
9307 return VK_ERROR_VALIDATION_FAILED_EXT;
9308
9309 VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9310
9311 if (res == VK_SUCCESS) {
9312 loader_platform_thread_lock_mutex(&globalLock);
9313 my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9314 loader_platform_thread_unlock_mutex(&globalLock);
9315 }
9316 return res;
9317 }
9318
vkCreateRenderPass(VkDevice device,const VkRenderPassCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkRenderPass * pRenderPass)9319 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9320 const VkAllocationCallbacks *pAllocator,
9321 VkRenderPass *pRenderPass) {
9322 VkBool32 skip_call = VK_FALSE;
9323 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9324 loader_platform_thread_lock_mutex(&globalLock);
9325 // Create DAG
9326 std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9327 std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9328 skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9329 // Validate
9330 skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9331 if (VK_FALSE != skip_call) {
9332 return VK_ERROR_VALIDATION_FAILED_EXT;
9333 }
9334 loader_platform_thread_unlock_mutex(&globalLock);
9335 VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9336 if (VK_SUCCESS == result) {
9337 loader_platform_thread_lock_mutex(&globalLock);
9338 // TODOSC : Merge in tracking of renderpass from shader_checker
9339 // Shadow create info and store in map
9340 VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9341 if (pCreateInfo->pAttachments) {
9342 localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9343 memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9344 localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9345 }
9346 if (pCreateInfo->pSubpasses) {
9347 localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9348 memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9349
9350 for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9351 VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9352 const uint32_t attachmentCount = subpass->inputAttachmentCount +
9353 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9354 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9355 VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9356
9357 memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9358 subpass->pInputAttachments = attachments;
9359 attachments += subpass->inputAttachmentCount;
9360
9361 memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9362 subpass->pColorAttachments = attachments;
9363 attachments += subpass->colorAttachmentCount;
9364
9365 if (subpass->pResolveAttachments) {
9366 memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9367 subpass->pResolveAttachments = attachments;
9368 attachments += subpass->colorAttachmentCount;
9369 }
9370
9371 if (subpass->pDepthStencilAttachment) {
9372 memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9373 subpass->pDepthStencilAttachment = attachments;
9374 attachments += 1;
9375 }
9376
9377 memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9378 subpass->pPreserveAttachments = &attachments->attachment;
9379 }
9380 }
9381 if (pCreateInfo->pDependencies) {
9382 localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9383 memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9384 localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9385 }
9386 dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9387 dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9388 dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9389 #if MTMERGESOURCE
9390 // MTMTODO : Merge with code from above to eliminate duplication
9391 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9392 VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9393 MT_PASS_ATTACHMENT_INFO pass_info;
9394 pass_info.load_op = desc.loadOp;
9395 pass_info.store_op = desc.storeOp;
9396 pass_info.attachment = i;
9397 dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info);
9398 }
9399 // TODO: Maybe fill list and then copy instead of locking
9400 std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read;
9401 std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
9402 dev_data->renderPassMap[*pRenderPass]->attachment_first_layout;
9403 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9404 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9405 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9406 uint32_t attachment = subpass.pInputAttachments[j].attachment;
9407 if (attachment_first_read.count(attachment))
9408 continue;
9409 attachment_first_read.insert(std::make_pair(attachment, true));
9410 attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9411 }
9412 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9413 uint32_t attachment = subpass.pColorAttachments[j].attachment;
9414 if (attachment_first_read.count(attachment))
9415 continue;
9416 attachment_first_read.insert(std::make_pair(attachment, false));
9417 attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9418 }
9419 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9420 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9421 if (attachment_first_read.count(attachment))
9422 continue;
9423 attachment_first_read.insert(std::make_pair(attachment, false));
9424 attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9425 }
9426 }
9427 #endif
9428 loader_platform_thread_unlock_mutex(&globalLock);
9429 }
9430 return result;
9431 }
9432 // Free the renderpass shadow
deleteRenderPasses(layer_data * my_data)9433 static void deleteRenderPasses(layer_data *my_data) {
9434 if (my_data->renderPassMap.size() <= 0)
9435 return;
9436 for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9437 const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9438 delete[] pRenderPassInfo->pAttachments;
9439 if (pRenderPassInfo->pSubpasses) {
9440 for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9441 // Attachements are all allocated in a block, so just need to
9442 // find the first non-null one to delete
9443 if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9444 delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9445 } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9446 delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9447 } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9448 delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9449 } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9450 delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9451 }
9452 }
9453 delete[] pRenderPassInfo->pSubpasses;
9454 }
9455 delete[] pRenderPassInfo->pDependencies;
9456 delete pRenderPassInfo;
9457 delete (*ii).second;
9458 }
9459 my_data->renderPassMap.clear();
9460 }
9461
VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer,const VkRenderPassBeginInfo * pRenderPassBegin)9462 VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9463 VkBool32 skip_call = VK_FALSE;
9464 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9465 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9466 const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9467 const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9468 if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9469 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9470 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9471 "with a different number of attachments.");
9472 }
9473 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9474 const VkImageView &image_view = framebufferInfo.pAttachments[i];
9475 auto image_data = dev_data->imageViewMap.find(image_view);
9476 assert(image_data != dev_data->imageViewMap.end());
9477 const VkImage &image = image_data->second.image;
9478 const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9479 IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9480 pRenderPassInfo->pAttachments[i].initialLayout};
9481 // TODO: Do not iterate over every possibility - consolidate where possible
9482 for (uint32_t j = 0; j < subRange.levelCount; j++) {
9483 uint32_t level = subRange.baseMipLevel + j;
9484 for (uint32_t k = 0; k < subRange.layerCount; k++) {
9485 uint32_t layer = subRange.baseArrayLayer + k;
9486 VkImageSubresource sub = {subRange.aspectMask, level, layer};
9487 IMAGE_CMD_BUF_LAYOUT_NODE node;
9488 if (!FindLayout(pCB, image, sub, node)) {
9489 SetLayout(pCB, image, sub, newNode);
9490 continue;
9491 }
9492 if (newNode.layout != node.layout) {
9493 skip_call |=
9494 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9495 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9496 "where the "
9497 "intial layout differs from the starting layout.",
9498 i);
9499 }
9500 }
9501 }
9502 }
9503 return skip_call;
9504 }
9505
TransitionSubpassLayouts(VkCommandBuffer cmdBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,const int subpass_index)9506 void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const int subpass_index) {
9507 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9508 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9509 auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9510 if (render_pass_data == dev_data->renderPassMap.end()) {
9511 return;
9512 }
9513 const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9514 auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9515 if (framebuffer_data == dev_data->frameBufferMap.end()) {
9516 return;
9517 }
9518 const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9519 const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9520 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9521 const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9522 SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9523 }
9524 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9525 const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9526 SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9527 }
9528 if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9529 const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9530 SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9531 }
9532 }
9533
validatePrimaryCommandBuffer(const layer_data * my_data,const GLOBAL_CB_NODE * pCB,const std::string & cmd_name)9534 VkBool32 validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9535 VkBool32 skip_call = VK_FALSE;
9536 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9537 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9538 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9539 cmd_name.c_str());
9540 }
9541 return skip_call;
9542 }
9543
TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer,const VkRenderPassBeginInfo * pRenderPassBegin)9544 void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9545 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9546 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9547 auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9548 if (render_pass_data == dev_data->renderPassMap.end()) {
9549 return;
9550 }
9551 const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9552 auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9553 if (framebuffer_data == dev_data->frameBufferMap.end()) {
9554 return;
9555 }
9556 const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9557 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9558 const VkImageView &image_view = framebufferInfo.pAttachments[i];
9559 SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9560 }
9561 }
9562
9563 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdBeginRenderPass(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,VkSubpassContents contents)9564 vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9565 VkBool32 skipCall = VK_FALSE;
9566 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9567 loader_platform_thread_lock_mutex(&globalLock);
9568 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9569 if (pCB) {
9570 if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9571 #if MTMERGE
9572 auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9573 if (pass_data != dev_data->renderPassMap.end()) {
9574 RENDER_PASS_NODE* pRPNode = pass_data->second;
9575 pRPNode->fb = pRenderPassBegin->framebuffer;
9576 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9577 for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9578 MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9579 if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9580 if (cb_data != dev_data->commandBufferMap.end()) {
9581 std::function<VkBool32()> function = [=]() {
9582 set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9583 return VK_FALSE;
9584 };
9585 cb_data->second->validate_functions.push_back(function);
9586 }
9587 VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment];
9588 if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9589 attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9590 skipCall |=
9591 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9592 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9593 MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9594 pRPNode->attachments[i].attachment, attachment_layout);
9595 }
9596 } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9597 if (cb_data != dev_data->commandBufferMap.end()) {
9598 std::function<VkBool32()> function = [=]() {
9599 set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9600 return VK_FALSE;
9601 };
9602 cb_data->second->validate_functions.push_back(function);
9603 }
9604 } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9605 if (cb_data != dev_data->commandBufferMap.end()) {
9606 std::function<VkBool32()> function = [=]() {
9607 return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9608 };
9609 cb_data->second->validate_functions.push_back(function);
9610 }
9611 }
9612 if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
9613 if (cb_data != dev_data->commandBufferMap.end()) {
9614 std::function<VkBool32()> function = [=]() {
9615 return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9616 };
9617 cb_data->second->validate_functions.push_back(function);
9618 }
9619 }
9620 }
9621 }
9622 #endif
9623 skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9624 auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9625 if (render_pass_data != dev_data->renderPassMap.end()) {
9626 skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9627 }
9628 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9629 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9630 skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9631 pCB->activeRenderPass = pRenderPassBegin->renderPass;
9632 // This is a shallow copy as that is all that is needed for now
9633 pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9634 pCB->activeSubpass = 0;
9635 pCB->activeSubpassContents = contents;
9636 pCB->framebuffer = pRenderPassBegin->framebuffer;
9637 // Connect this framebuffer to this cmdBuffer
9638 dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9639 } else {
9640 skipCall |=
9641 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9642 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9643 }
9644 }
9645 loader_platform_thread_unlock_mutex(&globalLock);
9646 if (VK_FALSE == skipCall) {
9647 dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9648 loader_platform_thread_lock_mutex(&globalLock);
9649 // This is a shallow copy as that is all that is needed for now
9650 dev_data->renderPassBeginInfo = *pRenderPassBegin;
9651 dev_data->currentSubpass = 0;
9652 loader_platform_thread_unlock_mutex(&globalLock);
9653 }
9654 }
9655
vkCmdNextSubpass(VkCommandBuffer commandBuffer,VkSubpassContents contents)9656 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9657 VkBool32 skipCall = VK_FALSE;
9658 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9659 loader_platform_thread_lock_mutex(&globalLock);
9660 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9661 TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
9662 if (pCB) {
9663 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9664 skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9665 pCB->activeSubpass++;
9666 pCB->activeSubpassContents = contents;
9667 TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9668 if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9669 skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9670 pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9671 }
9672 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9673 }
9674 loader_platform_thread_unlock_mutex(&globalLock);
9675 if (VK_FALSE == skipCall)
9676 dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9677 }
9678
vkCmdEndRenderPass(VkCommandBuffer commandBuffer)9679 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9680 VkBool32 skipCall = VK_FALSE;
9681 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9682 loader_platform_thread_lock_mutex(&globalLock);
9683 #if MTMERGESOURCE
9684 auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9685 if (cb_data != dev_data->commandBufferMap.end()) {
9686 auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass);
9687 if (pass_data != dev_data->renderPassMap.end()) {
9688 RENDER_PASS_NODE* pRPNode = pass_data->second;
9689 for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9690 MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9691 if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9692 if (cb_data != dev_data->commandBufferMap.end()) {
9693 std::function<VkBool32()> function = [=]() {
9694 set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9695 return VK_FALSE;
9696 };
9697 cb_data->second->validate_functions.push_back(function);
9698 }
9699 } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9700 if (cb_data != dev_data->commandBufferMap.end()) {
9701 std::function<VkBool32()> function = [=]() {
9702 set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9703 return VK_FALSE;
9704 };
9705 cb_data->second->validate_functions.push_back(function);
9706 }
9707 }
9708 }
9709 }
9710 }
9711 #endif
9712 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9713 TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
9714 if (pCB) {
9715 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9716 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9717 skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9718 TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9719 pCB->activeRenderPass = 0;
9720 pCB->activeSubpass = 0;
9721 }
9722 loader_platform_thread_unlock_mutex(&globalLock);
9723 if (VK_FALSE == skipCall)
9724 dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9725 }
9726
logInvalidAttachmentMessage(layer_data * dev_data,VkCommandBuffer secondaryBuffer,VkRenderPass secondaryPass,VkRenderPass primaryPass,uint32_t primaryAttach,uint32_t secondaryAttach,const char * msg)9727 bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9728 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char *msg) {
9729 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9730 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9731 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9732 " that is not compatible with the current render pass %" PRIx64 "."
9733 "Attachment %" PRIu32 " is not compatable with %" PRIu32 ". %s",
9734 (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9735 msg);
9736 }
9737
validateAttachmentCompatibility(layer_data * dev_data,VkCommandBuffer primaryBuffer,VkRenderPass primaryPass,uint32_t primaryAttach,VkCommandBuffer secondaryBuffer,VkRenderPass secondaryPass,uint32_t secondaryAttach,bool is_multi)9738 bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9739 uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9740 uint32_t secondaryAttach, bool is_multi) {
9741 bool skip_call = false;
9742 auto primary_data = dev_data->renderPassMap.find(primaryPass);
9743 auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9744 if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9745 primaryAttach = VK_ATTACHMENT_UNUSED;
9746 }
9747 if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9748 secondaryAttach = VK_ATTACHMENT_UNUSED;
9749 }
9750 if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9751 return skip_call;
9752 }
9753 if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9754 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9755 secondaryAttach, "The first is unused while the second is not.");
9756 return skip_call;
9757 }
9758 if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9759 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9760 secondaryAttach, "The second is unused while the first is not.");
9761 return skip_call;
9762 }
9763 if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9764 secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9765 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9766 secondaryAttach, "They have different formats.");
9767 }
9768 if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9769 secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9770 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9771 secondaryAttach, "They have different samples.");
9772 }
9773 if (is_multi &&
9774 primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9775 secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9776 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9777 secondaryAttach, "They have different flags.");
9778 }
9779 return skip_call;
9780 }
9781
validateSubpassCompatibility(layer_data * dev_data,VkCommandBuffer primaryBuffer,VkRenderPass primaryPass,VkCommandBuffer secondaryBuffer,VkRenderPass secondaryPass,const int subpass,bool is_multi)9782 bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9783 VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
9784 bool skip_call = false;
9785 auto primary_data = dev_data->renderPassMap.find(primaryPass);
9786 auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9787 const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9788 const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9789 uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9790 for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9791 uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9792 if (i < primary_desc.inputAttachmentCount) {
9793 primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9794 }
9795 if (i < secondary_desc.inputAttachmentCount) {
9796 secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9797 }
9798 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9799 secondaryPass, secondary_input_attach, is_multi);
9800 }
9801 uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9802 for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9803 uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9804 if (i < primary_desc.colorAttachmentCount) {
9805 primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9806 }
9807 if (i < secondary_desc.colorAttachmentCount) {
9808 secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9809 }
9810 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9811 secondaryPass, secondary_color_attach, is_multi);
9812 uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9813 if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9814 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9815 }
9816 if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9817 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9818 }
9819 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9820 secondaryPass, secondary_resolve_attach, is_multi);
9821 }
9822 uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9823 if (primary_desc.pDepthStencilAttachment) {
9824 primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9825 }
9826 if (secondary_desc.pDepthStencilAttachment) {
9827 secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9828 }
9829 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9830 secondaryPass, secondary_depthstencil_attach, is_multi);
9831 return skip_call;
9832 }
9833
validateRenderPassCompatibility(layer_data * dev_data,VkCommandBuffer primaryBuffer,VkRenderPass primaryPass,VkCommandBuffer secondaryBuffer,VkRenderPass secondaryPass)9834 bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9835 VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9836 bool skip_call = false;
9837 // Early exit if renderPass objects are identical (and therefore compatible)
9838 if (primaryPass == secondaryPass)
9839 return skip_call;
9840 auto primary_data = dev_data->renderPassMap.find(primaryPass);
9841 auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9842 if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9843 skip_call |=
9844 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9845 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9846 "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9847 (void *)primaryBuffer, (uint64_t)(primaryPass));
9848 return skip_call;
9849 }
9850 if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9851 skip_call |=
9852 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9853 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9854 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9855 (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9856 return skip_call;
9857 }
9858 if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9859 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9860 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9861 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9862 " that is not compatible with the current render pass %" PRIx64 "."
9863 "They have a different number of subpasses.",
9864 (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9865 return skip_call;
9866 }
9867 bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9868 for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9869 skip_call |=
9870 validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9871 }
9872 return skip_call;
9873 }
9874
validateFramebuffer(layer_data * dev_data,VkCommandBuffer primaryBuffer,const GLOBAL_CB_NODE * pCB,VkCommandBuffer secondaryBuffer,const GLOBAL_CB_NODE * pSubCB)9875 bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9876 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9877 bool skip_call = false;
9878 if (!pSubCB->beginInfo.pInheritanceInfo) {
9879 return skip_call;
9880 }
9881 VkFramebuffer primary_fb = pCB->framebuffer;
9882 VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9883 if (secondary_fb != VK_NULL_HANDLE) {
9884 if (primary_fb != secondary_fb) {
9885 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9886 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9887 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9888 " that is not compatible with the current framebuffer %" PRIx64 ".",
9889 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9890 }
9891 auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9892 if (fb_data == dev_data->frameBufferMap.end()) {
9893 skip_call |=
9894 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9895 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9896 "which has invalid framebuffer %" PRIx64 ".",
9897 (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9898 return skip_call;
9899 }
9900 skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9901 secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9902 }
9903 return skip_call;
9904 }
9905
validateSecondaryCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * pCB,GLOBAL_CB_NODE * pSubCB)9906 bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9907 bool skipCall = false;
9908 unordered_set<int> activeTypes;
9909 for (auto queryObject : pCB->activeQueries) {
9910 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9911 if (queryPoolData != dev_data->queryPoolMap.end()) {
9912 if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9913 pSubCB->beginInfo.pInheritanceInfo) {
9914 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9915 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9916 skipCall |= log_msg(
9917 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9918 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9919 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9920 "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
9921 "buffer must have all bits set on the queryPool.",
9922 reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9923 }
9924 }
9925 activeTypes.insert(queryPoolData->second.createInfo.queryType);
9926 }
9927 }
9928 for (auto queryObject : pSubCB->startedQueries) {
9929 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9930 if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9931 skipCall |=
9932 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9933 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9934 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9935 "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
9936 "secondary Cmd Buffer %p.",
9937 reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9938 queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9939 }
9940 }
9941 return skipCall;
9942 }
9943
9944 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkCmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBuffersCount,const VkCommandBuffer * pCommandBuffers)9945 vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9946 VkBool32 skipCall = VK_FALSE;
9947 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9948 loader_platform_thread_lock_mutex(&globalLock);
9949 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9950 if (pCB) {
9951 GLOBAL_CB_NODE *pSubCB = NULL;
9952 for (uint32_t i = 0; i < commandBuffersCount; i++) {
9953 pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9954 if (!pSubCB) {
9955 skipCall |=
9956 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9957 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9958 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
9959 (void *)pCommandBuffers[i], i);
9960 } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9961 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9962 __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9963 "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
9964 "array. All cmd buffers in pCommandBuffers array must be secondary.",
9965 (void *)pCommandBuffers[i], i);
9966 } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9967 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9968 skipCall |= log_msg(
9969 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9970 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9971 "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
9972 ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9973 (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
9974 } else {
9975 // Make sure render pass is compatible with parent command buffer pass if has continue
9976 skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
9977 pSubCB->beginInfo.pInheritanceInfo->renderPass);
9978 skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9979 }
9980 string errorString = "";
9981 if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
9982 pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9983 skipCall |= log_msg(
9984 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9985 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9986 "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
9987 ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
9988 (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9989 (uint64_t)pCB->activeRenderPass, errorString.c_str());
9990 }
9991 // If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9992 // that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9993 if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9994 if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9995 skipCall |= log_msg(
9996 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9997 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9998 "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
9999 ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
10000 (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
10001 (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
10002 }
10003 }
10004 }
10005 // TODO(mlentine): Move more logic into this method
10006 skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10007 skipCall |= validateCommandBufferState(dev_data, pSubCB);
10008 // Secondary cmdBuffers are considered pending execution starting w/
10009 // being recorded
10010 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10011 if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10012 skipCall |= log_msg(
10013 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10014 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10015 "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10016 "set!",
10017 (uint64_t)(pCB->commandBuffer));
10018 }
10019 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10020 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10021 skipCall |= log_msg(
10022 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10023 (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10024 "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
10025 ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10026 "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10027 "set, even though it does.",
10028 (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10029 pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10030 }
10031 }
10032 if (!pCB->activeQueries.empty() && !dev_data->physDevProperties.features.inheritedQueries) {
10033 skipCall |=
10034 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10035 reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10036 "vkCmdExecuteCommands(): Secondary Command Buffer "
10037 "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
10038 "flight and inherited queries not "
10039 "supported on this device.",
10040 reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10041 }
10042 pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10043 pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10044 dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10045 }
10046 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10047 skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10048 }
10049 loader_platform_thread_unlock_mutex(&globalLock);
10050 if (VK_FALSE == skipCall)
10051 dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10052 }
10053
ValidateMapImageLayouts(VkDevice device,VkDeviceMemory mem)10054 VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
10055 VkBool32 skip_call = VK_FALSE;
10056 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10057 auto mem_data = dev_data->memObjMap.find(mem);
10058 if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
10059 std::vector<VkImageLayout> layouts;
10060 if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
10061 for (auto layout : layouts) {
10062 if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10063 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10064 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10065 "GENERAL or PREINITIALIZED are supported.",
10066 string_VkImageLayout(layout));
10067 }
10068 }
10069 }
10070 }
10071 return skip_call;
10072 }
10073
10074 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkMapMemory(VkDevice device,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size,VkFlags flags,void ** ppData)10075 vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10076 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10077
10078 VkBool32 skip_call = VK_FALSE;
10079 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10080 loader_platform_thread_lock_mutex(&globalLock);
10081 #if MTMERGESOURCE
10082 DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
10083 if (pMemObj) {
10084 pMemObj->valid = true;
10085 if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10086 skip_call =
10087 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10088 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10089 "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
10090 }
10091 }
10092 skip_call |= validateMemRange(dev_data, mem, offset, size);
10093 storeMemRanges(dev_data, mem, offset, size);
10094 #endif
10095 skip_call |= ValidateMapImageLayouts(device, mem);
10096 loader_platform_thread_unlock_mutex(&globalLock);
10097
10098 if (VK_FALSE == skip_call) {
10099 result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10100 #if MTMERGESOURCE
10101 initializeAndTrackMemory(dev_data, mem, size, ppData);
10102 #endif
10103 }
10104 return result;
10105 }
10106
10107 #if MTMERGESOURCE
vkUnmapMemory(VkDevice device,VkDeviceMemory mem)10108 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10109 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10110 VkBool32 skipCall = VK_FALSE;
10111
10112 loader_platform_thread_lock_mutex(&globalLock);
10113 skipCall |= deleteMemRanges(my_data, mem);
10114 loader_platform_thread_unlock_mutex(&globalLock);
10115 if (VK_FALSE == skipCall) {
10116 my_data->device_dispatch_table->UnmapMemory(device, mem);
10117 }
10118 }
10119
validateMemoryIsMapped(layer_data * my_data,const char * funcName,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)10120 VkBool32 validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10121 const VkMappedMemoryRange *pMemRanges) {
10122 VkBool32 skipCall = VK_FALSE;
10123 for (uint32_t i = 0; i < memRangeCount; ++i) {
10124 auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10125 if (mem_element != my_data->memObjMap.end()) {
10126 if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
10127 skipCall |= log_msg(
10128 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10129 (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10130 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10131 "(" PRINTF_SIZE_T_SPECIFIER ").",
10132 funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
10133 }
10134 if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
10135 ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
10136 (pMemRanges[i].offset + pMemRanges[i].size))) {
10137 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10138 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10139 MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10140 ") exceeds the Memory Object's upper-bound "
10141 "(" PRINTF_SIZE_T_SPECIFIER ").",
10142 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10143 static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
10144 }
10145 }
10146 }
10147 return skipCall;
10148 }
10149
validateAndCopyNoncoherentMemoryToDriver(layer_data * my_data,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)10150 VkBool32 validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10151 const VkMappedMemoryRange *pMemRanges) {
10152 VkBool32 skipCall = VK_FALSE;
10153 for (uint32_t i = 0; i < memRangeCount; ++i) {
10154 auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10155 if (mem_element != my_data->memObjMap.end()) {
10156 if (mem_element->second.pData) {
10157 VkDeviceSize size = mem_element->second.memRange.size;
10158 VkDeviceSize half_size = (size / 2);
10159 char *data = static_cast<char *>(mem_element->second.pData);
10160 for (auto j = 0; j < half_size; ++j) {
10161 if (data[j] != NoncoherentMemoryFillValue) {
10162 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10163 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10164 MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10165 (uint64_t)pMemRanges[i].memory);
10166 }
10167 }
10168 for (auto j = size + half_size; j < 2 * size; ++j) {
10169 if (data[j] != NoncoherentMemoryFillValue) {
10170 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10171 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10172 MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10173 (uint64_t)pMemRanges[i].memory);
10174 }
10175 }
10176 memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
10177 }
10178 }
10179 }
10180 return skipCall;
10181 }
10182
10183 VK_LAYER_EXPORT VkResult VKAPI_CALL
vkFlushMappedMemoryRanges(VkDevice device,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)10184 vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10185 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10186 VkBool32 skipCall = VK_FALSE;
10187 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10188
10189 loader_platform_thread_lock_mutex(&globalLock);
10190 skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10191 skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10192 loader_platform_thread_unlock_mutex(&globalLock);
10193 if (VK_FALSE == skipCall) {
10194 result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10195 }
10196 return result;
10197 }
10198
10199 VK_LAYER_EXPORT VkResult VKAPI_CALL
vkInvalidateMappedMemoryRanges(VkDevice device,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)10200 vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10201 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10202 VkBool32 skipCall = VK_FALSE;
10203 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10204
10205 loader_platform_thread_lock_mutex(&globalLock);
10206 skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10207 loader_platform_thread_unlock_mutex(&globalLock);
10208 if (VK_FALSE == skipCall) {
10209 result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10210 }
10211 return result;
10212 }
10213 #endif
10214
vkBindImageMemory(VkDevice device,VkImage image,VkDeviceMemory mem,VkDeviceSize memoryOffset)10215 VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10216 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10217 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10218 VkBool32 skipCall = VK_FALSE;
10219 #if MTMERGESOURCE
10220 loader_platform_thread_lock_mutex(&globalLock);
10221 // Track objects tied to memory
10222 uint64_t image_handle = (uint64_t)(image);
10223 skipCall =
10224 set_mem_binding(dev_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10225 add_object_binding_info(dev_data, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
10226 {
10227 VkMemoryRequirements memRequirements;
10228 vkGetImageMemoryRequirements(device, image, &memRequirements);
10229 skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
10230 dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
10231 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
10232 }
10233 print_mem_list(dev_data, device);
10234 loader_platform_thread_unlock_mutex(&globalLock);
10235 #endif
10236 if (VK_FALSE == skipCall) {
10237 result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10238 VkMemoryRequirements memRequirements;
10239 dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10240 loader_platform_thread_lock_mutex(&globalLock);
10241 dev_data->memObjMap[mem].image = image;
10242 dev_data->imageMap[image].mem = mem;
10243 dev_data->imageMap[image].memOffset = memoryOffset;
10244 dev_data->imageMap[image].memSize = memRequirements.size;
10245 loader_platform_thread_unlock_mutex(&globalLock);
10246 }
10247 return result;
10248 }
10249
vkSetEvent(VkDevice device,VkEvent event)10250 VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
10251 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10252 loader_platform_thread_lock_mutex(&globalLock);
10253 dev_data->eventMap[event].needsSignaled = false;
10254 dev_data->eventMap[event].stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10255 loader_platform_thread_unlock_mutex(&globalLock);
10256 VkResult result = dev_data->device_dispatch_table->SetEvent(device, event);
10257 return result;
10258 }
10259
10260 VKAPI_ATTR VkResult VKAPI_CALL
vkQueueBindSparse(VkQueue queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence)10261 vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10262 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10263 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10264 VkBool32 skip_call = VK_FALSE;
10265 #if MTMERGESOURCE
10266 //MTMTODO : Merge this code with the checks below
10267 loader_platform_thread_lock_mutex(&globalLock);
10268
10269 for (uint32_t i = 0; i < bindInfoCount; i++) {
10270 const VkBindSparseInfo *bindInfo = &pBindInfo[i];
10271 // Track objects tied to memory
10272 for (uint32_t j = 0; j < bindInfo->bufferBindCount; j++) {
10273 for (uint32_t k = 0; k < bindInfo->pBufferBinds[j].bindCount; k++) {
10274 if (set_sparse_mem_binding(dev_data, queue, bindInfo->pBufferBinds[j].pBinds[k].memory,
10275 (uint64_t)bindInfo->pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10276 "vkQueueBindSparse"))
10277 skip_call = VK_TRUE;
10278 }
10279 }
10280 for (uint32_t j = 0; j < bindInfo->imageOpaqueBindCount; j++) {
10281 for (uint32_t k = 0; k < bindInfo->pImageOpaqueBinds[j].bindCount; k++) {
10282 if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
10283 (uint64_t)bindInfo->pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10284 "vkQueueBindSparse"))
10285 skip_call = VK_TRUE;
10286 }
10287 }
10288 for (uint32_t j = 0; j < bindInfo->imageBindCount; j++) {
10289 for (uint32_t k = 0; k < bindInfo->pImageBinds[j].bindCount; k++) {
10290 if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageBinds[j].pBinds[k].memory,
10291 (uint64_t)bindInfo->pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10292 "vkQueueBindSparse"))
10293 skip_call = VK_TRUE;
10294 }
10295 }
10296 // Validate semaphore state
10297 for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10298 VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10299
10300 if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10301 if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
10302 skip_call =
10303 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10304 (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10305 "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
10306 }
10307 dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
10308 }
10309 }
10310 for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; i++) {
10311 VkSemaphore sem = bindInfo->pSignalSemaphores[i];
10312
10313 if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10314 if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10315 skip_call =
10316 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10317 (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10318 "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
10319 }
10320 dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10321 }
10322 }
10323 }
10324
10325 print_mem_list(dev_data, queue);
10326 loader_platform_thread_unlock_mutex(&globalLock);
10327 #endif
10328 loader_platform_thread_lock_mutex(&globalLock);
10329 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10330 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10331 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10332 if (dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled) {
10333 dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled = 0;
10334 } else {
10335 skip_call |=
10336 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10337 __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10338 "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10339 (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
10340 }
10341 }
10342 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10343 dev_data->semaphoreMap[bindInfo.pSignalSemaphores[i]].signaled = 1;
10344 }
10345 }
10346 loader_platform_thread_unlock_mutex(&globalLock);
10347
10348 if (VK_FALSE == skip_call)
10349 return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10350 #if MTMERGESOURCE
10351 // Update semaphore state
10352 loader_platform_thread_lock_mutex(&globalLock);
10353 for (uint32_t bind_info_idx = 0; bind_info_idx < bindInfoCount; bind_info_idx++) {
10354 const VkBindSparseInfo *bindInfo = &pBindInfo[bind_info_idx];
10355 for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10356 VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10357
10358 if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10359 dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10360 }
10361 }
10362 }
10363 loader_platform_thread_unlock_mutex(&globalLock);
10364 #endif
10365
10366 return result;
10367 }
10368
vkCreateSemaphore(VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)10369 VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10370 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10371 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10372 VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10373 if (result == VK_SUCCESS) {
10374 loader_platform_thread_lock_mutex(&globalLock);
10375 SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10376 sNode->signaled = 0;
10377 sNode->queue = VK_NULL_HANDLE;
10378 sNode->in_use.store(0);
10379 sNode->state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10380 loader_platform_thread_unlock_mutex(&globalLock);
10381 }
10382 return result;
10383 }
10384
10385 VKAPI_ATTR VkResult VKAPI_CALL
vkCreateEvent(VkDevice device,const VkEventCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkEvent * pEvent)10386 vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10387 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10388 VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10389 if (result == VK_SUCCESS) {
10390 loader_platform_thread_lock_mutex(&globalLock);
10391 dev_data->eventMap[*pEvent].needsSignaled = false;
10392 dev_data->eventMap[*pEvent].in_use.store(0);
10393 dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10394 loader_platform_thread_unlock_mutex(&globalLock);
10395 }
10396 return result;
10397 }
10398
vkCreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)10399 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10400 const VkAllocationCallbacks *pAllocator,
10401 VkSwapchainKHR *pSwapchain) {
10402 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10403 VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10404
10405 if (VK_SUCCESS == result) {
10406 SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10407 loader_platform_thread_lock_mutex(&globalLock);
10408 dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10409 loader_platform_thread_unlock_mutex(&globalLock);
10410 }
10411
10412 return result;
10413 }
10414
10415 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDestroySwapchainKHR(VkDevice device,VkSwapchainKHR swapchain,const VkAllocationCallbacks * pAllocator)10416 vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10417 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10418 bool skipCall = false;
10419
10420 loader_platform_thread_lock_mutex(&globalLock);
10421 auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10422 if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10423 if (swapchain_data->second->images.size() > 0) {
10424 for (auto swapchain_image : swapchain_data->second->images) {
10425 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10426 if (image_sub != dev_data->imageSubresourceMap.end()) {
10427 for (auto imgsubpair : image_sub->second) {
10428 auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10429 if (image_item != dev_data->imageLayoutMap.end()) {
10430 dev_data->imageLayoutMap.erase(image_item);
10431 }
10432 }
10433 dev_data->imageSubresourceMap.erase(image_sub);
10434 }
10435 #if MTMERGESOURCE
10436 skipCall = clear_object_binding(dev_data, device, (uint64_t)swapchain_image,
10437 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10438 dev_data->imageBindingMap.erase((uint64_t)swapchain_image);
10439 #endif
10440 }
10441 }
10442 delete swapchain_data->second;
10443 dev_data->device_extensions.swapchainMap.erase(swapchain);
10444 }
10445 loader_platform_thread_unlock_mutex(&globalLock);
10446 if (!skipCall)
10447 dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10448 }
10449
10450 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkGetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pCount,VkImage * pSwapchainImages)10451 vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10452 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10453 VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10454
10455 if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10456 // This should never happen and is checked by param checker.
10457 if (!pCount)
10458 return result;
10459 loader_platform_thread_lock_mutex(&globalLock);
10460 const size_t count = *pCount;
10461 auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10462 if (!swapchain_node->images.empty()) {
10463 // TODO : Not sure I like the memcmp here, but it works
10464 const bool mismatch = (swapchain_node->images.size() != count ||
10465 memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10466 if (mismatch) {
10467 // TODO: Verify against Valid Usage section of extension
10468 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10469 (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10470 "vkGetSwapchainInfoKHR(%" PRIu64
10471 ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10472 (uint64_t)(swapchain));
10473 }
10474 }
10475 for (uint32_t i = 0; i < *pCount; ++i) {
10476 IMAGE_LAYOUT_NODE image_layout_node;
10477 image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10478 image_layout_node.format = swapchain_node->createInfo.imageFormat;
10479 dev_data->imageMap[pSwapchainImages[i]].createInfo.mipLevels = 1;
10480 dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10481 swapchain_node->images.push_back(pSwapchainImages[i]);
10482 ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10483 dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10484 dev_data->imageLayoutMap[subpair] = image_layout_node;
10485 dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10486 }
10487 if (!swapchain_node->images.empty()) {
10488 for (auto image : swapchain_node->images) {
10489 // Add image object binding, then insert the new Mem Object and then bind it to created image
10490 #if MTMERGESOURCE
10491 add_object_create_info(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10492 &swapchain_node->createInfo);
10493 #endif
10494 }
10495 }
10496 loader_platform_thread_unlock_mutex(&globalLock);
10497 }
10498 return result;
10499 }
10500
vkQueuePresentKHR(VkQueue queue,const VkPresentInfoKHR * pPresentInfo)10501 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10502 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10503 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10504 bool skip_call = false;
10505
10506 if (pPresentInfo) {
10507 loader_platform_thread_lock_mutex(&globalLock);
10508 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10509 if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled) {
10510 dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled = 0;
10511 } else {
10512 skip_call |=
10513 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10514 __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10515 "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10516 (uint64_t)(queue), (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
10517 }
10518 }
10519 VkDeviceMemory mem;
10520 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10521 auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10522 if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10523 pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10524 VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10525 #if MTMERGESOURCE
10526 skip_call |=
10527 get_mem_binding_from_object(dev_data, queue, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10528 skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10529 #endif
10530 vector<VkImageLayout> layouts;
10531 if (FindLayouts(dev_data, image, layouts)) {
10532 for (auto layout : layouts) {
10533 if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10534 skip_call |=
10535 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10536 reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10537 "Images passed to present must be in layout "
10538 "PRESENT_SOURCE_KHR but is in %s",
10539 string_VkImageLayout(layout));
10540 }
10541 }
10542 }
10543 }
10544 }
10545 loader_platform_thread_unlock_mutex(&globalLock);
10546 }
10547
10548 if (!skip_call)
10549 result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10550 #if MTMERGESOURCE
10551 loader_platform_thread_lock_mutex(&globalLock);
10552 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
10553 VkSemaphore sem = pPresentInfo->pWaitSemaphores[i];
10554 if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10555 dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10556 }
10557 }
10558 loader_platform_thread_unlock_mutex(&globalLock);
10559 #endif
10560 return result;
10561 }
10562
vkAcquireNextImageKHR(VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)10563 VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10564 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10565 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10566 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10567 bool skipCall = false;
10568 #if MTMERGESOURCE
10569 loader_platform_thread_lock_mutex(&globalLock);
10570 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10571 if (dev_data->semaphoreMap[semaphore].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10572 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10573 (uint64_t)semaphore, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10574 "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10575 }
10576 dev_data->semaphoreMap[semaphore].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10577 }
10578 auto fence_data = dev_data->fenceMap.find(fence);
10579 if (fence_data != dev_data->fenceMap.end()) {
10580 fence_data->second.swapchain = swapchain;
10581 }
10582 loader_platform_thread_unlock_mutex(&globalLock);
10583 #endif
10584 if (!skipCall) {
10585 result =
10586 dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10587 }
10588 loader_platform_thread_lock_mutex(&globalLock);
10589 // FIXME/TODO: Need to add some thing code the "fence" parameter
10590 dev_data->semaphoreMap[semaphore].signaled = 1;
10591 loader_platform_thread_unlock_mutex(&globalLock);
10592 return result;
10593 }
10594
10595 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkCreateDebugReportCallbackEXT(VkInstance instance,const VkDebugReportCallbackCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDebugReportCallbackEXT * pMsgCallback)10596 vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10597 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10598 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10599 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10600 VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10601 if (VK_SUCCESS == res) {
10602 loader_platform_thread_lock_mutex(&globalLock);
10603 res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10604 loader_platform_thread_unlock_mutex(&globalLock);
10605 }
10606 return res;
10607 }
10608
vkDestroyDebugReportCallbackEXT(VkInstance instance,VkDebugReportCallbackEXT msgCallback,const VkAllocationCallbacks * pAllocator)10609 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10610 VkDebugReportCallbackEXT msgCallback,
10611 const VkAllocationCallbacks *pAllocator) {
10612 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10613 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10614 pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10615 loader_platform_thread_lock_mutex(&globalLock);
10616 layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10617 loader_platform_thread_unlock_mutex(&globalLock);
10618 }
10619
10620 VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkDebugReportMessageEXT(VkInstance instance,VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objType,uint64_t object,size_t location,int32_t msgCode,const char * pLayerPrefix,const char * pMsg)10621 vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10622 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10623 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10624 my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10625 pMsg);
10626 }
10627
vkGetDeviceProcAddr(VkDevice dev,const char * funcName)10628 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10629 if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10630 return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10631 if (!strcmp(funcName, "vkDestroyDevice"))
10632 return (PFN_vkVoidFunction)vkDestroyDevice;
10633 if (!strcmp(funcName, "vkQueueSubmit"))
10634 return (PFN_vkVoidFunction)vkQueueSubmit;
10635 if (!strcmp(funcName, "vkWaitForFences"))
10636 return (PFN_vkVoidFunction)vkWaitForFences;
10637 if (!strcmp(funcName, "vkGetFenceStatus"))
10638 return (PFN_vkVoidFunction)vkGetFenceStatus;
10639 if (!strcmp(funcName, "vkQueueWaitIdle"))
10640 return (PFN_vkVoidFunction)vkQueueWaitIdle;
10641 if (!strcmp(funcName, "vkDeviceWaitIdle"))
10642 return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10643 if (!strcmp(funcName, "vkGetDeviceQueue"))
10644 return (PFN_vkVoidFunction)vkGetDeviceQueue;
10645 if (!strcmp(funcName, "vkDestroyInstance"))
10646 return (PFN_vkVoidFunction)vkDestroyInstance;
10647 if (!strcmp(funcName, "vkDestroyDevice"))
10648 return (PFN_vkVoidFunction)vkDestroyDevice;
10649 if (!strcmp(funcName, "vkDestroyFence"))
10650 return (PFN_vkVoidFunction)vkDestroyFence;
10651 if (!strcmp(funcName, "vkResetFences"))
10652 return (PFN_vkVoidFunction)vkResetFences;
10653 if (!strcmp(funcName, "vkDestroySemaphore"))
10654 return (PFN_vkVoidFunction)vkDestroySemaphore;
10655 if (!strcmp(funcName, "vkDestroyEvent"))
10656 return (PFN_vkVoidFunction)vkDestroyEvent;
10657 if (!strcmp(funcName, "vkDestroyQueryPool"))
10658 return (PFN_vkVoidFunction)vkDestroyQueryPool;
10659 if (!strcmp(funcName, "vkDestroyBuffer"))
10660 return (PFN_vkVoidFunction)vkDestroyBuffer;
10661 if (!strcmp(funcName, "vkDestroyBufferView"))
10662 return (PFN_vkVoidFunction)vkDestroyBufferView;
10663 if (!strcmp(funcName, "vkDestroyImage"))
10664 return (PFN_vkVoidFunction)vkDestroyImage;
10665 if (!strcmp(funcName, "vkDestroyImageView"))
10666 return (PFN_vkVoidFunction)vkDestroyImageView;
10667 if (!strcmp(funcName, "vkDestroyShaderModule"))
10668 return (PFN_vkVoidFunction)vkDestroyShaderModule;
10669 if (!strcmp(funcName, "vkDestroyPipeline"))
10670 return (PFN_vkVoidFunction)vkDestroyPipeline;
10671 if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10672 return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10673 if (!strcmp(funcName, "vkDestroySampler"))
10674 return (PFN_vkVoidFunction)vkDestroySampler;
10675 if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10676 return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10677 if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10678 return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10679 if (!strcmp(funcName, "vkDestroyFramebuffer"))
10680 return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10681 if (!strcmp(funcName, "vkDestroyRenderPass"))
10682 return (PFN_vkVoidFunction)vkDestroyRenderPass;
10683 if (!strcmp(funcName, "vkCreateBuffer"))
10684 return (PFN_vkVoidFunction)vkCreateBuffer;
10685 if (!strcmp(funcName, "vkCreateBufferView"))
10686 return (PFN_vkVoidFunction)vkCreateBufferView;
10687 if (!strcmp(funcName, "vkCreateImage"))
10688 return (PFN_vkVoidFunction)vkCreateImage;
10689 if (!strcmp(funcName, "vkCreateImageView"))
10690 return (PFN_vkVoidFunction)vkCreateImageView;
10691 if (!strcmp(funcName, "vkCreateFence"))
10692 return (PFN_vkVoidFunction)vkCreateFence;
10693 if (!strcmp(funcName, "CreatePipelineCache"))
10694 return (PFN_vkVoidFunction)vkCreatePipelineCache;
10695 if (!strcmp(funcName, "DestroyPipelineCache"))
10696 return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10697 if (!strcmp(funcName, "GetPipelineCacheData"))
10698 return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10699 if (!strcmp(funcName, "MergePipelineCaches"))
10700 return (PFN_vkVoidFunction)vkMergePipelineCaches;
10701 if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10702 return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10703 if (!strcmp(funcName, "vkCreateComputePipelines"))
10704 return (PFN_vkVoidFunction)vkCreateComputePipelines;
10705 if (!strcmp(funcName, "vkCreateSampler"))
10706 return (PFN_vkVoidFunction)vkCreateSampler;
10707 if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10708 return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10709 if (!strcmp(funcName, "vkCreatePipelineLayout"))
10710 return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10711 if (!strcmp(funcName, "vkCreateDescriptorPool"))
10712 return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10713 if (!strcmp(funcName, "vkResetDescriptorPool"))
10714 return (PFN_vkVoidFunction)vkResetDescriptorPool;
10715 if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10716 return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10717 if (!strcmp(funcName, "vkFreeDescriptorSets"))
10718 return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10719 if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10720 return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10721 if (!strcmp(funcName, "vkCreateCommandPool"))
10722 return (PFN_vkVoidFunction)vkCreateCommandPool;
10723 if (!strcmp(funcName, "vkDestroyCommandPool"))
10724 return (PFN_vkVoidFunction)vkDestroyCommandPool;
10725 if (!strcmp(funcName, "vkResetCommandPool"))
10726 return (PFN_vkVoidFunction)vkResetCommandPool;
10727 if (!strcmp(funcName, "vkCreateQueryPool"))
10728 return (PFN_vkVoidFunction)vkCreateQueryPool;
10729 if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10730 return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10731 if (!strcmp(funcName, "vkFreeCommandBuffers"))
10732 return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10733 if (!strcmp(funcName, "vkBeginCommandBuffer"))
10734 return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10735 if (!strcmp(funcName, "vkEndCommandBuffer"))
10736 return (PFN_vkVoidFunction)vkEndCommandBuffer;
10737 if (!strcmp(funcName, "vkResetCommandBuffer"))
10738 return (PFN_vkVoidFunction)vkResetCommandBuffer;
10739 if (!strcmp(funcName, "vkCmdBindPipeline"))
10740 return (PFN_vkVoidFunction)vkCmdBindPipeline;
10741 if (!strcmp(funcName, "vkCmdSetViewport"))
10742 return (PFN_vkVoidFunction)vkCmdSetViewport;
10743 if (!strcmp(funcName, "vkCmdSetScissor"))
10744 return (PFN_vkVoidFunction)vkCmdSetScissor;
10745 if (!strcmp(funcName, "vkCmdSetLineWidth"))
10746 return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10747 if (!strcmp(funcName, "vkCmdSetDepthBias"))
10748 return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10749 if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10750 return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10751 if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10752 return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10753 if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10754 return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10755 if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10756 return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10757 if (!strcmp(funcName, "vkCmdSetStencilReference"))
10758 return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10759 if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10760 return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10761 if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10762 return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10763 if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10764 return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10765 if (!strcmp(funcName, "vkCmdDraw"))
10766 return (PFN_vkVoidFunction)vkCmdDraw;
10767 if (!strcmp(funcName, "vkCmdDrawIndexed"))
10768 return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10769 if (!strcmp(funcName, "vkCmdDrawIndirect"))
10770 return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10771 if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10772 return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10773 if (!strcmp(funcName, "vkCmdDispatch"))
10774 return (PFN_vkVoidFunction)vkCmdDispatch;
10775 if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10776 return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10777 if (!strcmp(funcName, "vkCmdCopyBuffer"))
10778 return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10779 if (!strcmp(funcName, "vkCmdCopyImage"))
10780 return (PFN_vkVoidFunction)vkCmdCopyImage;
10781 if (!strcmp(funcName, "vkCmdBlitImage"))
10782 return (PFN_vkVoidFunction)vkCmdBlitImage;
10783 if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10784 return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10785 if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10786 return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10787 if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10788 return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10789 if (!strcmp(funcName, "vkCmdFillBuffer"))
10790 return (PFN_vkVoidFunction)vkCmdFillBuffer;
10791 if (!strcmp(funcName, "vkCmdClearColorImage"))
10792 return (PFN_vkVoidFunction)vkCmdClearColorImage;
10793 if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10794 return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10795 if (!strcmp(funcName, "vkCmdClearAttachments"))
10796 return (PFN_vkVoidFunction)vkCmdClearAttachments;
10797 if (!strcmp(funcName, "vkCmdResolveImage"))
10798 return (PFN_vkVoidFunction)vkCmdResolveImage;
10799 if (!strcmp(funcName, "vkCmdSetEvent"))
10800 return (PFN_vkVoidFunction)vkCmdSetEvent;
10801 if (!strcmp(funcName, "vkCmdResetEvent"))
10802 return (PFN_vkVoidFunction)vkCmdResetEvent;
10803 if (!strcmp(funcName, "vkCmdWaitEvents"))
10804 return (PFN_vkVoidFunction)vkCmdWaitEvents;
10805 if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10806 return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10807 if (!strcmp(funcName, "vkCmdBeginQuery"))
10808 return (PFN_vkVoidFunction)vkCmdBeginQuery;
10809 if (!strcmp(funcName, "vkCmdEndQuery"))
10810 return (PFN_vkVoidFunction)vkCmdEndQuery;
10811 if (!strcmp(funcName, "vkCmdResetQueryPool"))
10812 return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10813 if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10814 return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10815 if (!strcmp(funcName, "vkCmdPushConstants"))
10816 return (PFN_vkVoidFunction)vkCmdPushConstants;
10817 if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10818 return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10819 if (!strcmp(funcName, "vkCreateFramebuffer"))
10820 return (PFN_vkVoidFunction)vkCreateFramebuffer;
10821 if (!strcmp(funcName, "vkCreateShaderModule"))
10822 return (PFN_vkVoidFunction)vkCreateShaderModule;
10823 if (!strcmp(funcName, "vkCreateRenderPass"))
10824 return (PFN_vkVoidFunction)vkCreateRenderPass;
10825 if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10826 return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10827 if (!strcmp(funcName, "vkCmdNextSubpass"))
10828 return (PFN_vkVoidFunction)vkCmdNextSubpass;
10829 if (!strcmp(funcName, "vkCmdEndRenderPass"))
10830 return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10831 if (!strcmp(funcName, "vkCmdExecuteCommands"))
10832 return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10833 if (!strcmp(funcName, "vkSetEvent"))
10834 return (PFN_vkVoidFunction)vkSetEvent;
10835 if (!strcmp(funcName, "vkMapMemory"))
10836 return (PFN_vkVoidFunction)vkMapMemory;
10837 #if MTMERGESOURCE
10838 if (!strcmp(funcName, "vkUnmapMemory"))
10839 return (PFN_vkVoidFunction)vkUnmapMemory;
10840 if (!strcmp(funcName, "vkAllocateMemory"))
10841 return (PFN_vkVoidFunction)vkAllocateMemory;
10842 if (!strcmp(funcName, "vkFreeMemory"))
10843 return (PFN_vkVoidFunction)vkFreeMemory;
10844 if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10845 return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10846 if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10847 return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10848 if (!strcmp(funcName, "vkBindBufferMemory"))
10849 return (PFN_vkVoidFunction)vkBindBufferMemory;
10850 if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10851 return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10852 if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10853 return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10854 #endif
10855 if (!strcmp(funcName, "vkGetQueryPoolResults"))
10856 return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10857 if (!strcmp(funcName, "vkBindImageMemory"))
10858 return (PFN_vkVoidFunction)vkBindImageMemory;
10859 if (!strcmp(funcName, "vkQueueBindSparse"))
10860 return (PFN_vkVoidFunction)vkQueueBindSparse;
10861 if (!strcmp(funcName, "vkCreateSemaphore"))
10862 return (PFN_vkVoidFunction)vkCreateSemaphore;
10863 if (!strcmp(funcName, "vkCreateEvent"))
10864 return (PFN_vkVoidFunction)vkCreateEvent;
10865
10866 if (dev == NULL)
10867 return NULL;
10868
10869 layer_data *dev_data;
10870 dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10871
10872 if (dev_data->device_extensions.wsi_enabled) {
10873 if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10874 return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10875 if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10876 return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10877 if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10878 return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10879 if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10880 return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10881 if (!strcmp(funcName, "vkQueuePresentKHR"))
10882 return (PFN_vkVoidFunction)vkQueuePresentKHR;
10883 }
10884
10885 VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10886 {
10887 if (pTable->GetDeviceProcAddr == NULL)
10888 return NULL;
10889 return pTable->GetDeviceProcAddr(dev, funcName);
10890 }
10891 }
10892
vkGetInstanceProcAddr(VkInstance instance,const char * funcName)10893 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10894 if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10895 return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10896 if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10897 return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10898 if (!strcmp(funcName, "vkCreateInstance"))
10899 return (PFN_vkVoidFunction)vkCreateInstance;
10900 if (!strcmp(funcName, "vkCreateDevice"))
10901 return (PFN_vkVoidFunction)vkCreateDevice;
10902 if (!strcmp(funcName, "vkDestroyInstance"))
10903 return (PFN_vkVoidFunction)vkDestroyInstance;
10904 #if MTMERGESOURCE
10905 if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
10906 return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
10907 #endif
10908 if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10909 return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10910 if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10911 return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10912 if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10913 return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10914 if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10915 return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10916
10917 if (instance == NULL)
10918 return NULL;
10919
10920 PFN_vkVoidFunction fptr;
10921
10922 layer_data *my_data;
10923 my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10924 fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10925 if (fptr)
10926 return fptr;
10927
10928 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10929 if (pTable->GetInstanceProcAddr == NULL)
10930 return NULL;
10931 return pTable->GetInstanceProcAddr(instance, funcName);
10932 }
10933