1 /* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Tobin Ehlis <tobine@google.com>
26 */
27
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <inttypes.h>
32
33 #include "vulkan/vulkan.h"
34 #include "vk_loader_platform.h"
35
36 #include <vector>
37 #include <unordered_map>
38
39 #include "vulkan/vk_layer.h"
40 #include "vk_layer_config.h"
41 #include "vk_layer_table.h"
42 #include "vk_layer_data.h"
43 #include "vk_layer_logging.h"
44 #include "vk_layer_extension_utils.h"
45 #include "vk_safe_struct.h"
46 #include "vk_layer_utils.h"
47
48 struct layer_data {
49 bool wsi_enabled;
50
layer_datalayer_data51 layer_data() : wsi_enabled(false){};
52 };
53
54 struct instExts {
55 bool wsi_enabled;
56 bool xlib_enabled;
57 bool xcb_enabled;
58 bool wayland_enabled;
59 bool mir_enabled;
60 bool android_enabled;
61 bool win32_enabled;
62 };
63
64 static std::unordered_map<void *, struct instExts> instanceExtMap;
65 static std::unordered_map<void *, layer_data *> layer_data_map;
66 static device_table_map unique_objects_device_table_map;
67 static instance_table_map unique_objects_instance_table_map;
68 // Structure to wrap returned non-dispatchable objects to guarantee they have unique handles
69 // address of struct will be used as the unique handle
70 struct VkUniqueObject {
71 uint64_t actualObject;
72 };
73
74 // Handle CreateInstance
createInstanceRegisterExtensions(const VkInstanceCreateInfo * pCreateInfo,VkInstance instance)75 static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
76 uint32_t i;
77 VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(unique_objects_instance_table_map, instance);
78 PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
79
80 pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR");
81 pDisp->GetPhysicalDeviceSurfaceSupportKHR =
82 (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
83 pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR =
84 (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
85 pDisp->GetPhysicalDeviceSurfaceFormatsKHR =
86 (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
87 pDisp->GetPhysicalDeviceSurfacePresentModesKHR =
88 (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
89 #ifdef VK_USE_PLATFORM_WIN32_KHR
90 pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR");
91 pDisp->GetPhysicalDeviceWin32PresentationSupportKHR =
92 (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
93 #endif // VK_USE_PLATFORM_WIN32_KHR
94 #ifdef VK_USE_PLATFORM_XCB_KHR
95 pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR");
96 pDisp->GetPhysicalDeviceXcbPresentationSupportKHR =
97 (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
98 #endif // VK_USE_PLATFORM_XCB_KHR
99 #ifdef VK_USE_PLATFORM_XLIB_KHR
100 pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR");
101 pDisp->GetPhysicalDeviceXlibPresentationSupportKHR =
102 (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
103 #endif // VK_USE_PLATFORM_XLIB_KHR
104 #ifdef VK_USE_PLATFORM_MIR_KHR
105 pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR");
106 pDisp->GetPhysicalDeviceMirPresentationSupportKHR =
107 (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
108 #endif // VK_USE_PLATFORM_MIR_KHR
109 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
110 pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR");
111 pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR =
112 (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
113 #endif // VK_USE_PLATFORM_WAYLAND_KHR
114 #ifdef VK_USE_PLATFORM_ANDROID_KHR
115 pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR");
116 #endif // VK_USE_PLATFORM_ANDROID_KHR
117
118 instanceExtMap[pDisp] = {};
119 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
120 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0)
121 instanceExtMap[pDisp].wsi_enabled = true;
122 #ifdef VK_USE_PLATFORM_XLIB_KHR
123 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME) == 0)
124 instanceExtMap[pDisp].xlib_enabled = true;
125 #endif
126 #ifdef VK_USE_PLATFORM_XCB_KHR
127 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME) == 0)
128 instanceExtMap[pDisp].xcb_enabled = true;
129 #endif
130 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
131 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME) == 0)
132 instanceExtMap[pDisp].wayland_enabled = true;
133 #endif
134 #ifdef VK_USE_PLATFORM_MIR_KHR
135 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME) == 0)
136 instanceExtMap[pDisp].mir_enabled = true;
137 #endif
138 #ifdef VK_USE_PLATFORM_ANDROID_KHR
139 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME) == 0)
140 instanceExtMap[pDisp].android_enabled = true;
141 #endif
142 #ifdef VK_USE_PLATFORM_WIN32_KHR
143 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME) == 0)
144 instanceExtMap[pDisp].win32_enabled = true;
145 #endif
146 }
147 }
148
explicit_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)149 VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
150 VkInstance *pInstance) {
151 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
152
153 assert(chain_info->u.pLayerInfo);
154 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
155 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
156 if (fpCreateInstance == NULL) {
157 return VK_ERROR_INITIALIZATION_FAILED;
158 }
159
160 // Advance the link info for the next element on the chain
161 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
162
163 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
164 if (result != VK_SUCCESS) {
165 return result;
166 }
167
168 initInstanceTable(*pInstance, fpGetInstanceProcAddr, unique_objects_instance_table_map);
169
170 createInstanceRegisterExtensions(pCreateInfo, *pInstance);
171
172 return result;
173 }
174
175 // Handle CreateDevice
createDeviceRegisterExtensions(const VkDeviceCreateInfo * pCreateInfo,VkDevice device)176 static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
177 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
178 VkLayerDispatchTable *pDisp = get_dispatch_table(unique_objects_device_table_map, device);
179 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
180 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
181 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
182 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
183 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
184 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
185 my_device_data->wsi_enabled = false;
186 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
187 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
188 my_device_data->wsi_enabled = true;
189 }
190 }
191
explicit_CreateDevice(VkPhysicalDevice gpu,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)192 VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
193 VkDevice *pDevice) {
194 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
195
196 assert(chain_info->u.pLayerInfo);
197 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
198 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
199 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
200 if (fpCreateDevice == NULL) {
201 return VK_ERROR_INITIALIZATION_FAILED;
202 }
203
204 // Advance the link info for the next element on the chain
205 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
206
207 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
208 if (result != VK_SUCCESS) {
209 return result;
210 }
211
212 // Setup layer's device dispatch table
213 initDeviceTable(*pDevice, fpGetDeviceProcAddr, unique_objects_device_table_map);
214
215 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
216
217 return result;
218 }
219
explicit_QueueSubmit(VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)220 VkResult explicit_QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
221 // UNWRAP USES:
222 // 0 : fence,VkFence
223 if (VK_NULL_HANDLE != fence) {
224 fence = (VkFence)((VkUniqueObject *)fence)->actualObject;
225 }
226 // waitSemaphoreCount : pSubmits[submitCount]->pWaitSemaphores,VkSemaphore
227 std::vector<VkSemaphore> original_pWaitSemaphores = {};
228 // signalSemaphoreCount : pSubmits[submitCount]->pSignalSemaphores,VkSemaphore
229 std::vector<VkSemaphore> original_pSignalSemaphores = {};
230 if (pSubmits) {
231 for (uint32_t index0 = 0; index0 < submitCount; ++index0) {
232 if (pSubmits[index0].pWaitSemaphores) {
233 for (uint32_t index1 = 0; index1 < pSubmits[index0].waitSemaphoreCount; ++index1) {
234 VkSemaphore **ppSemaphore = (VkSemaphore **)&(pSubmits[index0].pWaitSemaphores);
235 original_pWaitSemaphores.push_back(pSubmits[index0].pWaitSemaphores[index1]);
236 *(ppSemaphore[index1]) =
237 (VkSemaphore)((VkUniqueObject *)pSubmits[index0].pWaitSemaphores[index1])->actualObject;
238 }
239 }
240 if (pSubmits[index0].pSignalSemaphores) {
241 for (uint32_t index1 = 0; index1 < pSubmits[index0].signalSemaphoreCount; ++index1) {
242 VkSemaphore **ppSemaphore = (VkSemaphore **)&(pSubmits[index0].pSignalSemaphores);
243 original_pSignalSemaphores.push_back(pSubmits[index0].pSignalSemaphores[index1]);
244 *(ppSemaphore[index1]) =
245 (VkSemaphore)((VkUniqueObject *)pSubmits[index0].pSignalSemaphores[index1])->actualObject;
246 }
247 }
248 }
249 }
250 VkResult result = get_dispatch_table(unique_objects_device_table_map, queue)->QueueSubmit(queue, submitCount, pSubmits, fence);
251 if (pSubmits) {
252 for (uint32_t index0 = 0; index0 < submitCount; ++index0) {
253 if (pSubmits[index0].pWaitSemaphores) {
254 for (uint32_t index1 = 0; index1 < pSubmits[index0].waitSemaphoreCount; ++index1) {
255 VkSemaphore **ppSemaphore = (VkSemaphore **)&(pSubmits[index0].pWaitSemaphores);
256 *(ppSemaphore[index1]) = original_pWaitSemaphores[index1];
257 }
258 }
259 if (pSubmits[index0].pSignalSemaphores) {
260 for (uint32_t index1 = 0; index1 < pSubmits[index0].signalSemaphoreCount; ++index1) {
261 VkSemaphore **ppSemaphore = (VkSemaphore **)&(pSubmits[index0].pSignalSemaphores);
262 *(ppSemaphore[index1]) = original_pSignalSemaphores[index1];
263 }
264 }
265 }
266 }
267 return result;
268 }
269
explicit_QueueBindSparse(VkQueue queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence)270 VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
271 // UNWRAP USES:
272 // 0 : pBindInfo[bindInfoCount]->pBufferBinds[bufferBindCount]->buffer,VkBuffer,
273 // pBindInfo[bindInfoCount]->pBufferBinds[bufferBindCount]->pBinds[bindCount]->memory,VkDeviceMemory,
274 // pBindInfo[bindInfoCount]->pImageOpaqueBinds[imageOpaqueBindCount]->image,VkImage,
275 // pBindInfo[bindInfoCount]->pImageOpaqueBinds[imageOpaqueBindCount]->pBinds[bindCount]->memory,VkDeviceMemory,
276 // pBindInfo[bindInfoCount]->pImageBinds[imageBindCount]->image,VkImage,
277 // pBindInfo[bindInfoCount]->pImageBinds[imageBindCount]->pBinds[bindCount]->memory,VkDeviceMemory
278 std::vector<VkBuffer> original_buffer = {};
279 std::vector<VkDeviceMemory> original_memory1 = {};
280 std::vector<VkImage> original_image1 = {};
281 std::vector<VkDeviceMemory> original_memory2 = {};
282 std::vector<VkImage> original_image2 = {};
283 std::vector<VkDeviceMemory> original_memory3 = {};
284 std::vector<VkSemaphore> original_pWaitSemaphores = {};
285 std::vector<VkSemaphore> original_pSignalSemaphores = {};
286 if (pBindInfo) {
287 for (uint32_t index0 = 0; index0 < bindInfoCount; ++index0) {
288 if (pBindInfo[index0].pBufferBinds) {
289 for (uint32_t index1 = 0; index1 < pBindInfo[index0].bufferBindCount; ++index1) {
290 if (pBindInfo[index0].pBufferBinds[index1].buffer) {
291 VkBuffer *pBuffer = (VkBuffer *)&(pBindInfo[index0].pBufferBinds[index1].buffer);
292 original_buffer.push_back(pBindInfo[index0].pBufferBinds[index1].buffer);
293 *(pBuffer) = (VkBuffer)((VkUniqueObject *)pBindInfo[index0].pBufferBinds[index1].buffer)->actualObject;
294 }
295 if (pBindInfo[index0].pBufferBinds[index1].pBinds) {
296 for (uint32_t index2 = 0; index2 < pBindInfo[index0].pBufferBinds[index1].bindCount; ++index2) {
297 if (pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory) {
298 VkDeviceMemory *pDeviceMemory =
299 (VkDeviceMemory *)&(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
300 original_memory1.push_back(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
301 *(pDeviceMemory) =
302 (VkDeviceMemory)((VkUniqueObject *)pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory)
303 ->actualObject;
304 }
305 }
306 }
307 }
308 }
309 if (pBindInfo[index0].pImageOpaqueBinds) {
310 for (uint32_t index1 = 0; index1 < pBindInfo[index0].imageOpaqueBindCount; ++index1) {
311 if (pBindInfo[index0].pImageOpaqueBinds[index1].image) {
312 VkImage *pImage = (VkImage *)&(pBindInfo[index0].pImageOpaqueBinds[index1].image);
313 original_image1.push_back(pBindInfo[index0].pImageOpaqueBinds[index1].image);
314 *(pImage) = (VkImage)((VkUniqueObject *)pBindInfo[index0].pImageOpaqueBinds[index1].image)->actualObject;
315 }
316 if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds) {
317 for (uint32_t index2 = 0; index2 < pBindInfo[index0].pImageOpaqueBinds[index1].bindCount; ++index2) {
318 if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory) {
319 VkDeviceMemory *pDeviceMemory =
320 (VkDeviceMemory *)&(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
321 original_memory2.push_back(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
322 *(pDeviceMemory) =
323 (VkDeviceMemory)(
324 (VkUniqueObject *)pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory)
325 ->actualObject;
326 }
327 }
328 }
329 }
330 }
331 if (pBindInfo[index0].pImageBinds) {
332 for (uint32_t index1 = 0; index1 < pBindInfo[index0].imageBindCount; ++index1) {
333 if (pBindInfo[index0].pImageBinds[index1].image) {
334 VkImage *pImage = (VkImage *)&(pBindInfo[index0].pImageBinds[index1].image);
335 original_image2.push_back(pBindInfo[index0].pImageBinds[index1].image);
336 *(pImage) = (VkImage)((VkUniqueObject *)pBindInfo[index0].pImageBinds[index1].image)->actualObject;
337 }
338 if (pBindInfo[index0].pImageBinds[index1].pBinds) {
339 for (uint32_t index2 = 0; index2 < pBindInfo[index0].pImageBinds[index1].bindCount; ++index2) {
340 if (pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory) {
341 VkDeviceMemory *pDeviceMemory =
342 (VkDeviceMemory *)&(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
343 original_memory3.push_back(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
344 *(pDeviceMemory) =
345 (VkDeviceMemory)((VkUniqueObject *)pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory)
346 ->actualObject;
347 }
348 }
349 }
350 }
351 }
352 if (pBindInfo[index0].pWaitSemaphores) {
353 for (uint32_t index1 = 0; index1 < pBindInfo[index0].waitSemaphoreCount; ++index1) {
354 VkSemaphore **ppSemaphore = (VkSemaphore **)&(pBindInfo[index0].pWaitSemaphores);
355 original_pWaitSemaphores.push_back(pBindInfo[index0].pWaitSemaphores[index1]);
356 *(ppSemaphore[index1]) =
357 (VkSemaphore)((VkUniqueObject *)pBindInfo[index0].pWaitSemaphores[index1])->actualObject;
358 }
359 }
360 if (pBindInfo[index0].pSignalSemaphores) {
361 for (uint32_t index1 = 0; index1 < pBindInfo[index0].signalSemaphoreCount; ++index1) {
362 VkSemaphore **ppSemaphore = (VkSemaphore **)&(pBindInfo[index0].pSignalSemaphores);
363 original_pSignalSemaphores.push_back(pBindInfo[index0].pSignalSemaphores[index1]);
364 *(ppSemaphore[index1]) =
365 (VkSemaphore)((VkUniqueObject *)pBindInfo[index0].pSignalSemaphores[index1])->actualObject;
366 }
367 }
368 }
369 }
370 if (VK_NULL_HANDLE != fence) {
371 fence = (VkFence)((VkUniqueObject *)fence)->actualObject;
372 }
373 VkResult result =
374 get_dispatch_table(unique_objects_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
375 if (pBindInfo) {
376 for (uint32_t index0 = 0; index0 < bindInfoCount; ++index0) {
377 if (pBindInfo[index0].pBufferBinds) {
378 for (uint32_t index1 = 0; index1 < pBindInfo[index0].bufferBindCount; ++index1) {
379 if (pBindInfo[index0].pBufferBinds[index1].buffer) {
380 VkBuffer *pBuffer = (VkBuffer *)&(pBindInfo[index0].pBufferBinds[index1].buffer);
381 *(pBuffer) = original_buffer[index1];
382 }
383 if (pBindInfo[index0].pBufferBinds[index1].pBinds) {
384 for (uint32_t index2 = 0; index2 < pBindInfo[index0].pBufferBinds[index1].bindCount; ++index2) {
385 if (pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory) {
386 VkDeviceMemory *pDeviceMemory =
387 (VkDeviceMemory *)&(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
388 *(pDeviceMemory) = original_memory1[index2];
389 }
390 }
391 }
392 }
393 }
394 if (pBindInfo[index0].pImageOpaqueBinds) {
395 for (uint32_t index1 = 0; index1 < pBindInfo[index0].imageOpaqueBindCount; ++index1) {
396 if (pBindInfo[index0].pImageOpaqueBinds[index1].image) {
397 VkImage *pImage = (VkImage *)&(pBindInfo[index0].pImageOpaqueBinds[index1].image);
398 *(pImage) = original_image1[index1];
399 }
400 if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds) {
401 for (uint32_t index2 = 0; index2 < pBindInfo[index0].pImageOpaqueBinds[index1].bindCount; ++index2) {
402 if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory) {
403 VkDeviceMemory *pDeviceMemory =
404 (VkDeviceMemory *)&(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
405 *(pDeviceMemory) = original_memory2[index2];
406 }
407 }
408 }
409 }
410 }
411 if (pBindInfo[index0].pImageBinds) {
412 for (uint32_t index1 = 0; index1 < pBindInfo[index0].imageBindCount; ++index1) {
413 if (pBindInfo[index0].pImageBinds[index1].image) {
414 VkImage *pImage = (VkImage *)&(pBindInfo[index0].pImageBinds[index1].image);
415 *(pImage) = original_image2[index1];
416 }
417 if (pBindInfo[index0].pImageBinds[index1].pBinds) {
418 for (uint32_t index2 = 0; index2 < pBindInfo[index0].pImageBinds[index1].bindCount; ++index2) {
419 if (pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory) {
420 VkDeviceMemory *pDeviceMemory =
421 (VkDeviceMemory *)&(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
422 *(pDeviceMemory) = original_memory3[index2];
423 }
424 }
425 }
426 }
427 }
428 if (pBindInfo[index0].pWaitSemaphores) {
429 for (uint32_t index1 = 0; index1 < pBindInfo[index0].waitSemaphoreCount; ++index1) {
430 VkSemaphore **ppSemaphore = (VkSemaphore **)&(pBindInfo[index0].pWaitSemaphores);
431 *(ppSemaphore[index1]) = original_pWaitSemaphores[index1];
432 }
433 }
434 if (pBindInfo[index0].pSignalSemaphores) {
435 for (uint32_t index1 = 0; index1 < pBindInfo[index0].signalSemaphoreCount; ++index1) {
436 VkSemaphore **ppSemaphore = (VkSemaphore **)&(pBindInfo[index0].pSignalSemaphores);
437 *(ppSemaphore[index1]) = original_pSignalSemaphores[index1];
438 }
439 }
440 }
441 }
442 return result;
443 }
444
explicit_CreateComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)445 VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
446 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
447 VkPipeline *pPipelines) {
448 // STRUCT USES:{'pipelineCache': 'VkPipelineCache', 'pCreateInfos[createInfoCount]': {'stage': {'module': 'VkShaderModule'},
449 // 'layout': 'VkPipelineLayout', 'basePipelineHandle': 'VkPipeline'}}
450 // LOCAL DECLS:{'pCreateInfos': 'VkComputePipelineCreateInfo*'}
451 safe_VkComputePipelineCreateInfo *local_pCreateInfos = NULL;
452 if (pCreateInfos) {
453 local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount];
454 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
455 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]);
456 if (pCreateInfos[idx0].basePipelineHandle) {
457 local_pCreateInfos[idx0].basePipelineHandle =
458 (VkPipeline)((VkUniqueObject *)pCreateInfos[idx0].basePipelineHandle)->actualObject;
459 }
460 if (pCreateInfos[idx0].layout) {
461 local_pCreateInfos[idx0].layout = (VkPipelineLayout)((VkUniqueObject *)pCreateInfos[idx0].layout)->actualObject;
462 }
463 if (pCreateInfos[idx0].stage.module) {
464 local_pCreateInfos[idx0].stage.module =
465 (VkShaderModule)((VkUniqueObject *)pCreateInfos[idx0].stage.module)->actualObject;
466 }
467 }
468 }
469 if (pipelineCache) {
470 pipelineCache = (VkPipelineCache)((VkUniqueObject *)pipelineCache)->actualObject;
471 }
472 // CODEGEN : file /usr/local/google/home/tobine/vulkan_work/LoaderAndTools/vk-layer-generate.py line #1671
473 VkResult result = get_dispatch_table(unique_objects_device_table_map, device)
474 ->CreateComputePipelines(device, pipelineCache, createInfoCount,
475 (const VkComputePipelineCreateInfo *)local_pCreateInfos, pAllocator, pPipelines);
476 delete[] local_pCreateInfos;
477 if (VK_SUCCESS == result) {
478 VkUniqueObject *pUO = NULL;
479 for (uint32_t i = 0; i < createInfoCount; ++i) {
480 pUO = new VkUniqueObject();
481 pUO->actualObject = (uint64_t)pPipelines[i];
482 pPipelines[i] = (VkPipeline)pUO;
483 }
484 }
485 return result;
486 }
487
explicit_CreateGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)488 VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
489 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
490 VkPipeline *pPipelines) {
491 // STRUCT USES:{'pipelineCache': 'VkPipelineCache', 'pCreateInfos[createInfoCount]': {'layout': 'VkPipelineLayout',
492 // 'pStages[stageCount]': {'module': 'VkShaderModule'}, 'renderPass': 'VkRenderPass', 'basePipelineHandle': 'VkPipeline'}}
493 // LOCAL DECLS:{'pCreateInfos': 'VkGraphicsPipelineCreateInfo*'}
494 safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = NULL;
495 if (pCreateInfos) {
496 local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount];
497 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
498 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]);
499 if (pCreateInfos[idx0].basePipelineHandle) {
500 local_pCreateInfos[idx0].basePipelineHandle =
501 (VkPipeline)((VkUniqueObject *)pCreateInfos[idx0].basePipelineHandle)->actualObject;
502 }
503 if (pCreateInfos[idx0].layout) {
504 local_pCreateInfos[idx0].layout = (VkPipelineLayout)((VkUniqueObject *)pCreateInfos[idx0].layout)->actualObject;
505 }
506 if (pCreateInfos[idx0].pStages) {
507 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
508 if (pCreateInfos[idx0].pStages[idx1].module) {
509 local_pCreateInfos[idx0].pStages[idx1].module =
510 (VkShaderModule)((VkUniqueObject *)pCreateInfos[idx0].pStages[idx1].module)->actualObject;
511 }
512 }
513 }
514 if (pCreateInfos[idx0].renderPass) {
515 local_pCreateInfos[idx0].renderPass = (VkRenderPass)((VkUniqueObject *)pCreateInfos[idx0].renderPass)->actualObject;
516 }
517 }
518 }
519 if (pipelineCache) {
520 pipelineCache = (VkPipelineCache)((VkUniqueObject *)pipelineCache)->actualObject;
521 }
522 // CODEGEN : file /usr/local/google/home/tobine/vulkan_work/LoaderAndTools/vk-layer-generate.py line #1671
523 VkResult result =
524 get_dispatch_table(unique_objects_device_table_map, device)
525 ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount,
526 (const VkGraphicsPipelineCreateInfo *)local_pCreateInfos, pAllocator, pPipelines);
527 delete[] local_pCreateInfos;
528 if (VK_SUCCESS == result) {
529 VkUniqueObject *pUO = NULL;
530 for (uint32_t i = 0; i < createInfoCount; ++i) {
531 pUO = new VkUniqueObject();
532 pUO->actualObject = (uint64_t)pPipelines[i];
533 pPipelines[i] = (VkPipeline)pUO;
534 }
535 }
536 return result;
537 }
538
explicit_GetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)539 VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
540 VkImage *pSwapchainImages) {
541 // UNWRAP USES:
542 // 0 : swapchain,VkSwapchainKHR, pSwapchainImages,VkImage
543 if (VK_NULL_HANDLE != swapchain) {
544 swapchain = (VkSwapchainKHR)((VkUniqueObject *)swapchain)->actualObject;
545 }
546 VkResult result = get_dispatch_table(unique_objects_device_table_map, device)
547 ->GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
548 // TODO : Need to add corresponding code to delete these images
549 if (VK_SUCCESS == result) {
550 if ((*pSwapchainImageCount > 0) && pSwapchainImages) {
551 std::vector<VkUniqueObject *> uniqueImages = {};
552 for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
553 uniqueImages.push_back(new VkUniqueObject());
554 uniqueImages[i]->actualObject = (uint64_t)pSwapchainImages[i];
555 pSwapchainImages[i] = (VkImage)uniqueImages[i];
556 }
557 }
558 }
559 return result;
560 }
561