1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <string.h>
25 #include <stdlib.h>
26 #include <assert.h>
27
28 #include <vulkan/vulkan_core.h>
29 #include <vulkan/vk_layer.h>
30
31 #include "git_sha1.h"
32
33 #include "imgui.h"
34
35 #include "overlay_params.h"
36
37 #include "util/u_debug.h"
38 #include "util/hash_table.h"
39 #include "util/list.h"
40 #include "util/ralloc.h"
41 #include "util/os_time.h"
42 #include "util/os_socket.h"
43 #include "util/simple_mtx.h"
44 #include "util/u_math.h"
45
46 #include "vk_enum_to_str.h"
47 #include "vk_dispatch_table.h"
48 #include "vk_util.h"
49
50 /* Mapped from VkInstace/VkPhysicalDevice */
51 struct instance_data {
52 struct vk_instance_dispatch_table vtable;
53 struct vk_physical_device_dispatch_table pd_vtable;
54 VkInstance instance;
55
56 struct overlay_params params;
57 bool pipeline_statistics_enabled;
58
59 bool first_line_printed;
60
61 int control_client;
62
63 /* Dumping of frame stats to a file has been enabled. */
64 bool capture_enabled;
65
66 /* Dumping of frame stats to a file has been enabled and started. */
67 bool capture_started;
68 };
69
70 struct frame_stat {
71 uint64_t stats[OVERLAY_PARAM_ENABLED_MAX];
72 };
73
74 /* Mapped from VkDevice */
75 struct queue_data;
76 struct device_data {
77 struct instance_data *instance;
78
79 PFN_vkSetDeviceLoaderData set_device_loader_data;
80
81 struct vk_device_dispatch_table vtable;
82 VkPhysicalDevice physical_device;
83 VkDevice device;
84
85 VkPhysicalDeviceProperties properties;
86
87 struct queue_data *graphic_queue;
88
89 struct queue_data **queues;
90 uint32_t n_queues;
91
92 bool pipeline_statistics_enabled;
93
94 /* For a single frame */
95 struct frame_stat frame_stats;
96 };
97
98 /* Mapped from VkCommandBuffer */
99 struct command_buffer_data {
100 struct device_data *device;
101
102 VkCommandBufferLevel level;
103
104 VkCommandBuffer cmd_buffer;
105 VkQueryPool pipeline_query_pool;
106 VkQueryPool timestamp_query_pool;
107 uint32_t query_index;
108
109 struct frame_stat stats;
110
111 struct list_head link; /* link into queue_data::running_command_buffer */
112 };
113
114 /* Mapped from VkQueue */
115 struct queue_data {
116 struct device_data *device;
117
118 VkQueue queue;
119 VkQueueFlags flags;
120 uint32_t family_index;
121 uint64_t timestamp_mask;
122
123 VkFence queries_fence;
124
125 struct list_head running_command_buffer;
126 };
127
128 struct overlay_draw {
129 struct list_head link;
130
131 VkCommandBuffer command_buffer;
132
133 VkSemaphore cross_engine_semaphore;
134
135 VkSemaphore semaphore;
136 VkFence fence;
137
138 VkBuffer vertex_buffer;
139 VkDeviceMemory vertex_buffer_mem;
140 VkDeviceSize vertex_buffer_size;
141
142 VkBuffer index_buffer;
143 VkDeviceMemory index_buffer_mem;
144 VkDeviceSize index_buffer_size;
145 };
146
147 /* Mapped from VkSwapchainKHR */
148 struct swapchain_data {
149 struct device_data *device;
150
151 VkSwapchainKHR swapchain;
152 unsigned width, height;
153 VkFormat format;
154
155 uint32_t n_images;
156 VkImage *images;
157 VkImageView *image_views;
158 VkFramebuffer *framebuffers;
159
160 VkRenderPass render_pass;
161
162 VkDescriptorPool descriptor_pool;
163 VkDescriptorSetLayout descriptor_layout;
164 VkDescriptorSet descriptor_set;
165
166 VkSampler font_sampler;
167
168 VkPipelineLayout pipeline_layout;
169 VkPipeline pipeline;
170
171 VkCommandPool command_pool;
172
173 struct list_head draws; /* List of struct overlay_draw */
174
175 bool font_uploaded;
176 VkImage font_image;
177 VkImageView font_image_view;
178 VkDeviceMemory font_mem;
179 VkBuffer upload_font_buffer;
180 VkDeviceMemory upload_font_buffer_mem;
181
182 /**/
183 ImGuiContext* imgui_context;
184 ImVec2 window_size;
185
186 /**/
187 uint64_t n_frames;
188 uint64_t last_present_time;
189
190 unsigned n_frames_since_update;
191 uint64_t last_fps_update;
192 double fps;
193
194 enum overlay_param_enabled stat_selector;
195 double time_dividor;
196 struct frame_stat stats_min, stats_max;
197 struct frame_stat frames_stats[200];
198
199 /* Over a single frame */
200 struct frame_stat frame_stats;
201
202 /* Over fps_sampling_period */
203 struct frame_stat accumulated_stats;
204 };
205
206 static const VkQueryPipelineStatisticFlags overlay_query_flags =
207 VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT |
208 VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT |
209 VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT |
210 VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT |
211 VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT |
212 VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT |
213 VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT |
214 VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT |
215 VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT |
216 VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT |
217 VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
218 #define OVERLAY_QUERY_COUNT (11)
219
220 static struct hash_table_u64 *vk_object_to_data = NULL;
221 static simple_mtx_t vk_object_to_data_mutex = SIMPLE_MTX_INITIALIZER;
222
223 thread_local ImGuiContext* __MesaImGui;
224
ensure_vk_object_map(void)225 static inline void ensure_vk_object_map(void)
226 {
227 if (!vk_object_to_data)
228 vk_object_to_data = _mesa_hash_table_u64_create(NULL);
229 }
230
231 #define HKEY(obj) ((uint64_t)(obj))
232 #define FIND(type, obj) ((type *)find_object_data(HKEY(obj)))
233
find_object_data(uint64_t obj)234 static void *find_object_data(uint64_t obj)
235 {
236 simple_mtx_lock(&vk_object_to_data_mutex);
237 ensure_vk_object_map();
238 void *data = _mesa_hash_table_u64_search(vk_object_to_data, obj);
239 simple_mtx_unlock(&vk_object_to_data_mutex);
240 return data;
241 }
242
map_object(uint64_t obj,void * data)243 static void map_object(uint64_t obj, void *data)
244 {
245 simple_mtx_lock(&vk_object_to_data_mutex);
246 ensure_vk_object_map();
247 _mesa_hash_table_u64_insert(vk_object_to_data, obj, data);
248 simple_mtx_unlock(&vk_object_to_data_mutex);
249 }
250
unmap_object(uint64_t obj)251 static void unmap_object(uint64_t obj)
252 {
253 simple_mtx_lock(&vk_object_to_data_mutex);
254 _mesa_hash_table_u64_remove(vk_object_to_data, obj);
255 simple_mtx_unlock(&vk_object_to_data_mutex);
256 }
257
258 /**/
259
260 #define VK_CHECK(expr) \
261 do { \
262 VkResult __result = (expr); \
263 if (__result != VK_SUCCESS) { \
264 fprintf(stderr, "'%s' line %i failed with %s\n", \
265 #expr, __LINE__, vk_Result_to_str(__result)); \
266 } \
267 } while (0)
268
269 /**/
270
get_instance_chain_info(const VkInstanceCreateInfo * pCreateInfo,VkLayerFunction func)271 static VkLayerInstanceCreateInfo *get_instance_chain_info(const VkInstanceCreateInfo *pCreateInfo,
272 VkLayerFunction func)
273 {
274 vk_foreach_struct_const(item, pCreateInfo->pNext) {
275 if (item->sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO &&
276 ((VkLayerInstanceCreateInfo *) item)->function == func)
277 return (VkLayerInstanceCreateInfo *) item;
278 }
279 unreachable("instance chain info not found");
280 return NULL;
281 }
282
get_device_chain_info(const VkDeviceCreateInfo * pCreateInfo,VkLayerFunction func)283 static VkLayerDeviceCreateInfo *get_device_chain_info(const VkDeviceCreateInfo *pCreateInfo,
284 VkLayerFunction func)
285 {
286 vk_foreach_struct_const(item, pCreateInfo->pNext) {
287 if (item->sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO &&
288 ((VkLayerDeviceCreateInfo *) item)->function == func)
289 return (VkLayerDeviceCreateInfo *)item;
290 }
291 unreachable("device chain info not found");
292 return NULL;
293 }
294
295 static void
free_chain(struct VkBaseOutStructure * chain)296 free_chain(struct VkBaseOutStructure *chain)
297 {
298 while (chain) {
299 void *node = chain;
300 chain = chain->pNext;
301 free(node);
302 }
303 }
304
305 static struct VkBaseOutStructure *
clone_chain(const struct VkBaseInStructure * chain)306 clone_chain(const struct VkBaseInStructure *chain)
307 {
308 struct VkBaseOutStructure *head = NULL, *tail = NULL;
309
310 vk_foreach_struct_const(item, chain) {
311 size_t item_size = vk_structure_type_size(item);
312 if (item_size == 0) {
313 free_chain(head);
314 return NULL;
315 }
316
317 struct VkBaseOutStructure *new_item =
318 (struct VkBaseOutStructure *)malloc(item_size);;
319
320 memcpy(new_item, item, item_size);
321
322 if (!head)
323 head = new_item;
324 if (tail)
325 tail->pNext = new_item;
326 tail = new_item;
327 }
328
329 return head;
330 }
331
332 /**/
333
new_instance_data(VkInstance instance)334 static struct instance_data *new_instance_data(VkInstance instance)
335 {
336 struct instance_data *data = rzalloc(NULL, struct instance_data);
337 data->instance = instance;
338 data->control_client = -1;
339 map_object(HKEY(data->instance), data);
340 return data;
341 }
342
destroy_instance_data(struct instance_data * data)343 static void destroy_instance_data(struct instance_data *data)
344 {
345 if (data->params.output_file)
346 fclose(data->params.output_file);
347 if (data->params.control >= 0)
348 os_socket_close(data->params.control);
349 unmap_object(HKEY(data->instance));
350 ralloc_free(data);
351 }
352
instance_data_map_physical_devices(struct instance_data * instance_data,bool map)353 static void instance_data_map_physical_devices(struct instance_data *instance_data,
354 bool map)
355 {
356 uint32_t physicalDeviceCount = 0;
357 instance_data->vtable.EnumeratePhysicalDevices(instance_data->instance,
358 &physicalDeviceCount,
359 NULL);
360
361 VkPhysicalDevice *physicalDevices = (VkPhysicalDevice *) malloc(sizeof(VkPhysicalDevice) * physicalDeviceCount);
362 instance_data->vtable.EnumeratePhysicalDevices(instance_data->instance,
363 &physicalDeviceCount,
364 physicalDevices);
365
366 for (uint32_t i = 0; i < physicalDeviceCount; i++) {
367 if (map)
368 map_object(HKEY(physicalDevices[i]), instance_data);
369 else
370 unmap_object(HKEY(physicalDevices[i]));
371 }
372
373 free(physicalDevices);
374 }
375
376 /**/
new_device_data(VkDevice device,struct instance_data * instance)377 static struct device_data *new_device_data(VkDevice device, struct instance_data *instance)
378 {
379 struct device_data *data = rzalloc(NULL, struct device_data);
380 data->instance = instance;
381 data->device = device;
382 map_object(HKEY(data->device), data);
383 return data;
384 }
385
new_queue_data(VkQueue queue,const VkQueueFamilyProperties * family_props,uint32_t family_index,struct device_data * device_data)386 static struct queue_data *new_queue_data(VkQueue queue,
387 const VkQueueFamilyProperties *family_props,
388 uint32_t family_index,
389 struct device_data *device_data)
390 {
391 struct queue_data *data = rzalloc(device_data, struct queue_data);
392 data->device = device_data;
393 data->queue = queue;
394 data->flags = family_props->queueFlags;
395 data->timestamp_mask = (1ull << family_props->timestampValidBits) - 1;
396 data->family_index = family_index;
397 list_inithead(&data->running_command_buffer);
398 map_object(HKEY(data->queue), data);
399
400 /* Fence synchronizing access to queries on that queue. */
401 VkFenceCreateInfo fence_info = {};
402 fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
403 fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
404 VK_CHECK(device_data->vtable.CreateFence(device_data->device,
405 &fence_info,
406 NULL,
407 &data->queries_fence));
408
409 if (data->flags & VK_QUEUE_GRAPHICS_BIT)
410 device_data->graphic_queue = data;
411
412 return data;
413 }
414
destroy_queue(struct queue_data * data)415 static void destroy_queue(struct queue_data *data)
416 {
417 struct device_data *device_data = data->device;
418 device_data->vtable.DestroyFence(device_data->device, data->queries_fence, NULL);
419 unmap_object(HKEY(data->queue));
420 ralloc_free(data);
421 }
422
device_map_queues(struct device_data * data,const VkDeviceCreateInfo * pCreateInfo)423 static void device_map_queues(struct device_data *data,
424 const VkDeviceCreateInfo *pCreateInfo)
425 {
426 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
427 data->n_queues += pCreateInfo->pQueueCreateInfos[i].queueCount;
428 data->queues = ralloc_array(data, struct queue_data *, data->n_queues);
429
430 struct instance_data *instance_data = data->instance;
431 uint32_t n_family_props;
432 instance_data->pd_vtable.GetPhysicalDeviceQueueFamilyProperties(data->physical_device,
433 &n_family_props,
434 NULL);
435 VkQueueFamilyProperties *family_props =
436 (VkQueueFamilyProperties *)malloc(sizeof(VkQueueFamilyProperties) * n_family_props);
437 instance_data->pd_vtable.GetPhysicalDeviceQueueFamilyProperties(data->physical_device,
438 &n_family_props,
439 family_props);
440
441 uint32_t queue_index = 0;
442 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
443 for (uint32_t j = 0; j < pCreateInfo->pQueueCreateInfos[i].queueCount; j++) {
444 VkQueue queue;
445 data->vtable.GetDeviceQueue(data->device,
446 pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex,
447 j, &queue);
448
449 VK_CHECK(data->set_device_loader_data(data->device, queue));
450
451 data->queues[queue_index++] =
452 new_queue_data(queue, &family_props[pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex],
453 pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, data);
454 }
455 }
456
457 free(family_props);
458 }
459
device_unmap_queues(struct device_data * data)460 static void device_unmap_queues(struct device_data *data)
461 {
462 for (uint32_t i = 0; i < data->n_queues; i++)
463 destroy_queue(data->queues[i]);
464 }
465
destroy_device_data(struct device_data * data)466 static void destroy_device_data(struct device_data *data)
467 {
468 unmap_object(HKEY(data->device));
469 ralloc_free(data);
470 }
471
472 /**/
new_command_buffer_data(VkCommandBuffer cmd_buffer,VkCommandBufferLevel level,VkQueryPool pipeline_query_pool,VkQueryPool timestamp_query_pool,uint32_t query_index,struct device_data * device_data)473 static struct command_buffer_data *new_command_buffer_data(VkCommandBuffer cmd_buffer,
474 VkCommandBufferLevel level,
475 VkQueryPool pipeline_query_pool,
476 VkQueryPool timestamp_query_pool,
477 uint32_t query_index,
478 struct device_data *device_data)
479 {
480 struct command_buffer_data *data = rzalloc(NULL, struct command_buffer_data);
481 data->device = device_data;
482 data->cmd_buffer = cmd_buffer;
483 data->level = level;
484 data->pipeline_query_pool = pipeline_query_pool;
485 data->timestamp_query_pool = timestamp_query_pool;
486 data->query_index = query_index;
487 list_inithead(&data->link);
488 map_object(HKEY(data->cmd_buffer), data);
489 return data;
490 }
491
destroy_command_buffer_data(struct command_buffer_data * data)492 static void destroy_command_buffer_data(struct command_buffer_data *data)
493 {
494 unmap_object(HKEY(data->cmd_buffer));
495 list_delinit(&data->link);
496 ralloc_free(data);
497 }
498
499 /**/
new_swapchain_data(VkSwapchainKHR swapchain,struct device_data * device_data)500 static struct swapchain_data *new_swapchain_data(VkSwapchainKHR swapchain,
501 struct device_data *device_data)
502 {
503 struct instance_data *instance_data = device_data->instance;
504 struct swapchain_data *data = rzalloc(NULL, struct swapchain_data);
505 data->device = device_data;
506 data->swapchain = swapchain;
507 data->window_size = ImVec2(instance_data->params.width, instance_data->params.height);
508 list_inithead(&data->draws);
509 map_object(HKEY(data->swapchain), data);
510 return data;
511 }
512
destroy_swapchain_data(struct swapchain_data * data)513 static void destroy_swapchain_data(struct swapchain_data *data)
514 {
515 unmap_object(HKEY(data->swapchain));
516 ralloc_free(data);
517 }
518
get_overlay_draw(struct swapchain_data * data)519 struct overlay_draw *get_overlay_draw(struct swapchain_data *data)
520 {
521 struct device_data *device_data = data->device;
522 struct overlay_draw *draw = list_is_empty(&data->draws) ?
523 NULL : list_first_entry(&data->draws, struct overlay_draw, link);
524
525 VkSemaphoreCreateInfo sem_info = {};
526 sem_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
527
528 if (draw && device_data->vtable.GetFenceStatus(device_data->device, draw->fence) == VK_SUCCESS) {
529 list_del(&draw->link);
530 VK_CHECK(device_data->vtable.ResetFences(device_data->device,
531 1, &draw->fence));
532 list_addtail(&draw->link, &data->draws);
533 return draw;
534 }
535
536 draw = rzalloc(data, struct overlay_draw);
537
538 VkCommandBufferAllocateInfo cmd_buffer_info = {};
539 cmd_buffer_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
540 cmd_buffer_info.commandPool = data->command_pool;
541 cmd_buffer_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
542 cmd_buffer_info.commandBufferCount = 1;
543 VK_CHECK(device_data->vtable.AllocateCommandBuffers(device_data->device,
544 &cmd_buffer_info,
545 &draw->command_buffer));
546 VK_CHECK(device_data->set_device_loader_data(device_data->device,
547 draw->command_buffer));
548
549
550 VkFenceCreateInfo fence_info = {};
551 fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
552 VK_CHECK(device_data->vtable.CreateFence(device_data->device,
553 &fence_info,
554 NULL,
555 &draw->fence));
556
557 VK_CHECK(device_data->vtable.CreateSemaphore(device_data->device, &sem_info,
558 NULL, &draw->semaphore));
559 VK_CHECK(device_data->vtable.CreateSemaphore(device_data->device, &sem_info,
560 NULL, &draw->cross_engine_semaphore));
561
562 list_addtail(&draw->link, &data->draws);
563
564 return draw;
565 }
566
param_unit(enum overlay_param_enabled param)567 static const char *param_unit(enum overlay_param_enabled param)
568 {
569 switch (param) {
570 case OVERLAY_PARAM_ENABLED_frame_timing:
571 case OVERLAY_PARAM_ENABLED_acquire_timing:
572 case OVERLAY_PARAM_ENABLED_present_timing:
573 return "(us)";
574 case OVERLAY_PARAM_ENABLED_gpu_timing:
575 return "(ns)";
576 default:
577 return "";
578 }
579 }
580
parse_command(struct instance_data * instance_data,const char * cmd,unsigned cmdlen,const char * param,unsigned paramlen)581 static void parse_command(struct instance_data *instance_data,
582 const char *cmd, unsigned cmdlen,
583 const char *param, unsigned paramlen)
584 {
585 if (!strncmp(cmd, "capture", cmdlen)) {
586 int value = atoi(param);
587 bool enabled = value > 0;
588
589 if (enabled) {
590 instance_data->capture_enabled = true;
591 } else {
592 instance_data->capture_enabled = false;
593 instance_data->capture_started = false;
594 }
595 }
596 }
597
598 #define BUFSIZE 4096
599
600 /**
601 * This function will process commands through the control file.
602 *
603 * A command starts with a colon, followed by the command, and followed by an
604 * option '=' and a parameter. It has to end with a semi-colon. A full command
605 * + parameter looks like:
606 *
607 * :cmd=param;
608 */
process_char(struct instance_data * instance_data,char c)609 static void process_char(struct instance_data *instance_data, char c)
610 {
611 static char cmd[BUFSIZE];
612 static char param[BUFSIZE];
613
614 static unsigned cmdpos = 0;
615 static unsigned parampos = 0;
616 static bool reading_cmd = false;
617 static bool reading_param = false;
618
619 switch (c) {
620 case ':':
621 cmdpos = 0;
622 parampos = 0;
623 reading_cmd = true;
624 reading_param = false;
625 break;
626 case ';':
627 if (!reading_cmd)
628 break;
629 cmd[cmdpos++] = '\0';
630 param[parampos++] = '\0';
631 parse_command(instance_data, cmd, cmdpos, param, parampos);
632 reading_cmd = false;
633 reading_param = false;
634 break;
635 case '=':
636 if (!reading_cmd)
637 break;
638 reading_param = true;
639 break;
640 default:
641 if (!reading_cmd)
642 break;
643
644 if (reading_param) {
645 /* overflow means an invalid parameter */
646 if (parampos >= BUFSIZE - 1) {
647 reading_cmd = false;
648 reading_param = false;
649 break;
650 }
651
652 param[parampos++] = c;
653 } else {
654 /* overflow means an invalid command */
655 if (cmdpos >= BUFSIZE - 1) {
656 reading_cmd = false;
657 break;
658 }
659
660 cmd[cmdpos++] = c;
661 }
662 }
663 }
664
control_send(struct instance_data * instance_data,const char * cmd,unsigned cmdlen,const char * param,unsigned paramlen)665 static void control_send(struct instance_data *instance_data,
666 const char *cmd, unsigned cmdlen,
667 const char *param, unsigned paramlen)
668 {
669 unsigned msglen = 0;
670 char buffer[BUFSIZE];
671
672 assert(cmdlen + paramlen + 3 < BUFSIZE);
673
674 buffer[msglen++] = ':';
675
676 memcpy(&buffer[msglen], cmd, cmdlen);
677 msglen += cmdlen;
678
679 if (paramlen > 0) {
680 buffer[msglen++] = '=';
681 memcpy(&buffer[msglen], param, paramlen);
682 msglen += paramlen;
683 buffer[msglen++] = ';';
684 }
685
686 os_socket_send(instance_data->control_client, buffer, msglen, 0);
687 }
688
control_send_connection_string(struct device_data * device_data)689 static void control_send_connection_string(struct device_data *device_data)
690 {
691 struct instance_data *instance_data = device_data->instance;
692
693 const char *controlVersionCmd = "MesaOverlayControlVersion";
694 const char *controlVersionString = "1";
695
696 control_send(instance_data, controlVersionCmd, strlen(controlVersionCmd),
697 controlVersionString, strlen(controlVersionString));
698
699 const char *deviceCmd = "DeviceName";
700 const char *deviceName = device_data->properties.deviceName;
701
702 control_send(instance_data, deviceCmd, strlen(deviceCmd),
703 deviceName, strlen(deviceName));
704
705 const char *mesaVersionCmd = "MesaVersion";
706 const char *mesaVersionString = "Mesa " PACKAGE_VERSION MESA_GIT_SHA1;
707
708 control_send(instance_data, mesaVersionCmd, strlen(mesaVersionCmd),
709 mesaVersionString, strlen(mesaVersionString));
710 }
711
control_client_check(struct device_data * device_data)712 static void control_client_check(struct device_data *device_data)
713 {
714 struct instance_data *instance_data = device_data->instance;
715
716 /* Already connected, just return. */
717 if (instance_data->control_client >= 0)
718 return;
719
720 int socket = os_socket_accept(instance_data->params.control);
721 if (socket == -1) {
722 if (errno != EAGAIN && errno != EWOULDBLOCK && errno != ECONNABORTED)
723 fprintf(stderr, "ERROR on socket: %s\n", strerror(errno));
724 return;
725 }
726
727 if (socket >= 0) {
728 os_socket_block(socket, false);
729 instance_data->control_client = socket;
730 control_send_connection_string(device_data);
731 }
732 }
733
control_client_disconnected(struct instance_data * instance_data)734 static void control_client_disconnected(struct instance_data *instance_data)
735 {
736 os_socket_close(instance_data->control_client);
737 instance_data->control_client = -1;
738 }
739
process_control_socket(struct instance_data * instance_data)740 static void process_control_socket(struct instance_data *instance_data)
741 {
742 const int client = instance_data->control_client;
743 if (client >= 0) {
744 char buf[BUFSIZE];
745
746 while (true) {
747 ssize_t n = os_socket_recv(client, buf, BUFSIZE, 0);
748
749 if (n == -1) {
750 if (errno == EAGAIN || errno == EWOULDBLOCK) {
751 /* nothing to read, try again later */
752 break;
753 }
754
755 if (errno != ECONNRESET)
756 fprintf(stderr, "ERROR on connection: %s\n", strerror(errno));
757
758 control_client_disconnected(instance_data);
759 } else if (n == 0) {
760 /* recv() returns 0 when the client disconnects */
761 control_client_disconnected(instance_data);
762 }
763
764 for (ssize_t i = 0; i < n; i++) {
765 process_char(instance_data, buf[i]);
766 }
767
768 /* If we try to read BUFSIZE and receive BUFSIZE bytes from the
769 * socket, there's a good chance that there's still more data to be
770 * read, so we will try again. Otherwise, simply be done for this
771 * iteration and try again on the next frame.
772 */
773 if (n < BUFSIZE)
774 break;
775 }
776 }
777 }
778
snapshot_swapchain_frame(struct swapchain_data * data)779 static void snapshot_swapchain_frame(struct swapchain_data *data)
780 {
781 struct device_data *device_data = data->device;
782 struct instance_data *instance_data = device_data->instance;
783 uint32_t f_idx = data->n_frames % ARRAY_SIZE(data->frames_stats);
784 uint64_t now = os_time_get(); /* us */
785
786 if (instance_data->params.control >= 0) {
787 control_client_check(device_data);
788 process_control_socket(instance_data);
789 }
790
791 if (data->last_present_time) {
792 data->frame_stats.stats[OVERLAY_PARAM_ENABLED_frame_timing] =
793 now - data->last_present_time;
794 }
795
796 memset(&data->frames_stats[f_idx], 0, sizeof(data->frames_stats[f_idx]));
797 for (int s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
798 data->frames_stats[f_idx].stats[s] += device_data->frame_stats.stats[s] + data->frame_stats.stats[s];
799 data->accumulated_stats.stats[s] += device_data->frame_stats.stats[s] + data->frame_stats.stats[s];
800 }
801
802 /* If capture has been enabled but it hasn't started yet, it means we are on
803 * the first snapshot after it has been enabled. At this point we want to
804 * use the stats captured so far to update the display, but we don't want
805 * this data to cause noise to the stats that we want to capture from now
806 * on.
807 *
808 * capture_begin == true will trigger an update of the fps on display, and a
809 * flush of the data, but no stats will be written to the output file. This
810 * way, we will have only stats from after the capture has been enabled
811 * written to the output_file.
812 */
813 const bool capture_begin =
814 instance_data->capture_enabled && !instance_data->capture_started;
815
816 if (data->last_fps_update) {
817 double elapsed = (double)(now - data->last_fps_update); /* us */
818 if (capture_begin ||
819 elapsed >= instance_data->params.fps_sampling_period) {
820 data->fps = 1000000.0f * data->n_frames_since_update / elapsed;
821 if (instance_data->capture_started) {
822 if (!instance_data->first_line_printed) {
823 bool first_column = true;
824
825 instance_data->first_line_printed = true;
826
827 #define OVERLAY_PARAM_BOOL(name) \
828 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_##name]) { \
829 fprintf(instance_data->params.output_file, \
830 "%s%s%s", first_column ? "" : ", ", #name, \
831 param_unit(OVERLAY_PARAM_ENABLED_##name)); \
832 first_column = false; \
833 }
834 #define OVERLAY_PARAM_CUSTOM(name)
835 OVERLAY_PARAMS
836 #undef OVERLAY_PARAM_BOOL
837 #undef OVERLAY_PARAM_CUSTOM
838 fprintf(instance_data->params.output_file, "\n");
839 }
840
841 for (int s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
842 if (!instance_data->params.enabled[s])
843 continue;
844 if (s == OVERLAY_PARAM_ENABLED_fps) {
845 fprintf(instance_data->params.output_file,
846 "%s%.2f", s == 0 ? "" : ", ", data->fps);
847 } else {
848 fprintf(instance_data->params.output_file,
849 "%s%" PRIu64, s == 0 ? "" : ", ",
850 data->accumulated_stats.stats[s]);
851 }
852 }
853 fprintf(instance_data->params.output_file, "\n");
854 fflush(instance_data->params.output_file);
855 }
856
857 memset(&data->accumulated_stats, 0, sizeof(data->accumulated_stats));
858 data->n_frames_since_update = 0;
859 data->last_fps_update = now;
860
861 if (capture_begin)
862 instance_data->capture_started = true;
863 }
864 } else {
865 data->last_fps_update = now;
866 }
867
868 memset(&device_data->frame_stats, 0, sizeof(device_data->frame_stats));
869 memset(&data->frame_stats, 0, sizeof(device_data->frame_stats));
870
871 data->last_present_time = now;
872 data->n_frames++;
873 data->n_frames_since_update++;
874 }
875
get_time_stat(void * _data,int _idx)876 static float get_time_stat(void *_data, int _idx)
877 {
878 struct swapchain_data *data = (struct swapchain_data *) _data;
879 if ((ARRAY_SIZE(data->frames_stats) - _idx) > data->n_frames)
880 return 0.0f;
881 int idx = ARRAY_SIZE(data->frames_stats) +
882 data->n_frames < ARRAY_SIZE(data->frames_stats) ?
883 _idx - data->n_frames :
884 _idx + data->n_frames;
885 idx %= ARRAY_SIZE(data->frames_stats);
886 /* Time stats are in us. */
887 return data->frames_stats[idx].stats[data->stat_selector] / data->time_dividor;
888 }
889
get_stat(void * _data,int _idx)890 static float get_stat(void *_data, int _idx)
891 {
892 struct swapchain_data *data = (struct swapchain_data *) _data;
893 if ((ARRAY_SIZE(data->frames_stats) - _idx) > data->n_frames)
894 return 0.0f;
895 int idx = ARRAY_SIZE(data->frames_stats) +
896 data->n_frames < ARRAY_SIZE(data->frames_stats) ?
897 _idx - data->n_frames :
898 _idx + data->n_frames;
899 idx %= ARRAY_SIZE(data->frames_stats);
900 return data->frames_stats[idx].stats[data->stat_selector];
901 }
902
position_layer(struct swapchain_data * data)903 static void position_layer(struct swapchain_data *data)
904
905 {
906 struct device_data *device_data = data->device;
907 struct instance_data *instance_data = device_data->instance;
908 const float margin = 10.0f;
909
910 ImGui::SetNextWindowBgAlpha(0.5);
911 ImGui::SetNextWindowSize(data->window_size, ImGuiCond_Always);
912 switch (instance_data->params.position) {
913 case LAYER_POSITION_TOP_LEFT:
914 ImGui::SetNextWindowPos(ImVec2(margin, margin), ImGuiCond_Always);
915 break;
916 case LAYER_POSITION_TOP_RIGHT:
917 ImGui::SetNextWindowPos(ImVec2(data->width - data->window_size.x - margin, margin),
918 ImGuiCond_Always);
919 break;
920 case LAYER_POSITION_BOTTOM_LEFT:
921 ImGui::SetNextWindowPos(ImVec2(margin, data->height - data->window_size.y - margin),
922 ImGuiCond_Always);
923 break;
924 case LAYER_POSITION_BOTTOM_RIGHT:
925 ImGui::SetNextWindowPos(ImVec2(data->width - data->window_size.x - margin,
926 data->height - data->window_size.y - margin),
927 ImGuiCond_Always);
928 break;
929 }
930 }
931
compute_swapchain_display(struct swapchain_data * data)932 static void compute_swapchain_display(struct swapchain_data *data)
933 {
934 struct device_data *device_data = data->device;
935 struct instance_data *instance_data = device_data->instance;
936
937 ImGui::SetCurrentContext(data->imgui_context);
938 ImGui::NewFrame();
939 position_layer(data);
940 ImGui::Begin("Mesa overlay");
941 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_device])
942 ImGui::Text("Device: %s", device_data->properties.deviceName);
943
944 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_format]) {
945 const char *format_name = vk_Format_to_str(data->format);
946 format_name = format_name ? (format_name + strlen("VK_FORMAT_")) : "unknown";
947 ImGui::Text("Swapchain format: %s", format_name);
948 }
949 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_frame])
950 ImGui::Text("Frames: %" PRIu64, data->n_frames);
951 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_fps])
952 ImGui::Text("FPS: %.2f" , data->fps);
953
954 /* Recompute min/max */
955 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
956 data->stats_min.stats[s] = UINT64_MAX;
957 data->stats_max.stats[s] = 0;
958 }
959 for (uint32_t f = 0; f < MIN2(data->n_frames, ARRAY_SIZE(data->frames_stats)); f++) {
960 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
961 data->stats_min.stats[s] = MIN2(data->frames_stats[f].stats[s],
962 data->stats_min.stats[s]);
963 data->stats_max.stats[s] = MAX2(data->frames_stats[f].stats[s],
964 data->stats_max.stats[s]);
965 }
966 }
967 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
968 assert(data->stats_min.stats[s] != UINT64_MAX);
969 }
970
971 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
972 if (!instance_data->params.enabled[s] ||
973 s == OVERLAY_PARAM_ENABLED_device ||
974 s == OVERLAY_PARAM_ENABLED_format ||
975 s == OVERLAY_PARAM_ENABLED_fps ||
976 s == OVERLAY_PARAM_ENABLED_frame)
977 continue;
978
979 char hash[40];
980 snprintf(hash, sizeof(hash), "##%s", overlay_param_names[s]);
981 data->stat_selector = (enum overlay_param_enabled) s;
982 data->time_dividor = 1000.0f;
983 if (s == OVERLAY_PARAM_ENABLED_gpu_timing)
984 data->time_dividor = 1000000.0f;
985
986 if (s == OVERLAY_PARAM_ENABLED_frame_timing ||
987 s == OVERLAY_PARAM_ENABLED_acquire_timing ||
988 s == OVERLAY_PARAM_ENABLED_present_timing ||
989 s == OVERLAY_PARAM_ENABLED_gpu_timing) {
990 double min_time = data->stats_min.stats[s] / data->time_dividor;
991 double max_time = data->stats_max.stats[s] / data->time_dividor;
992 ImGui::PlotHistogram(hash, get_time_stat, data,
993 ARRAY_SIZE(data->frames_stats), 0,
994 NULL, min_time, max_time,
995 ImVec2(ImGui::GetContentRegionAvailWidth(), 30));
996 ImGui::Text("%s: %.3fms [%.3f, %.3f]", overlay_param_names[s],
997 get_time_stat(data, ARRAY_SIZE(data->frames_stats) - 1),
998 min_time, max_time);
999 } else {
1000 ImGui::PlotHistogram(hash, get_stat, data,
1001 ARRAY_SIZE(data->frames_stats), 0,
1002 NULL,
1003 data->stats_min.stats[s],
1004 data->stats_max.stats[s],
1005 ImVec2(ImGui::GetContentRegionAvailWidth(), 30));
1006 ImGui::Text("%s: %.0f [%" PRIu64 ", %" PRIu64 "]", overlay_param_names[s],
1007 get_stat(data, ARRAY_SIZE(data->frames_stats) - 1),
1008 data->stats_min.stats[s], data->stats_max.stats[s]);
1009 }
1010 }
1011 data->window_size = ImVec2(data->window_size.x, ImGui::GetCursorPosY() + 10.0f);
1012 ImGui::End();
1013 ImGui::EndFrame();
1014 ImGui::Render();
1015 }
1016
vk_memory_type(struct device_data * data,VkMemoryPropertyFlags properties,uint32_t type_bits)1017 static uint32_t vk_memory_type(struct device_data *data,
1018 VkMemoryPropertyFlags properties,
1019 uint32_t type_bits)
1020 {
1021 VkPhysicalDeviceMemoryProperties prop;
1022 data->instance->pd_vtable.GetPhysicalDeviceMemoryProperties(data->physical_device, &prop);
1023 for (uint32_t i = 0; i < prop.memoryTypeCount; i++)
1024 if ((prop.memoryTypes[i].propertyFlags & properties) == properties && type_bits & (1<<i))
1025 return i;
1026 return 0xFFFFFFFF; // Unable to find memoryType
1027 }
1028
ensure_swapchain_fonts(struct swapchain_data * data,VkCommandBuffer command_buffer)1029 static void ensure_swapchain_fonts(struct swapchain_data *data,
1030 VkCommandBuffer command_buffer)
1031 {
1032 if (data->font_uploaded)
1033 return;
1034
1035 data->font_uploaded = true;
1036
1037 struct device_data *device_data = data->device;
1038 ImGuiIO& io = ImGui::GetIO();
1039 unsigned char* pixels;
1040 int width, height;
1041 io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height);
1042 size_t upload_size = width * height * 4 * sizeof(char);
1043
1044 /* Upload buffer */
1045 VkBufferCreateInfo buffer_info = {};
1046 buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1047 buffer_info.size = upload_size;
1048 buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1049 buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1050 VK_CHECK(device_data->vtable.CreateBuffer(device_data->device, &buffer_info,
1051 NULL, &data->upload_font_buffer));
1052 VkMemoryRequirements upload_buffer_req;
1053 device_data->vtable.GetBufferMemoryRequirements(device_data->device,
1054 data->upload_font_buffer,
1055 &upload_buffer_req);
1056 VkMemoryAllocateInfo upload_alloc_info = {};
1057 upload_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
1058 upload_alloc_info.allocationSize = upload_buffer_req.size;
1059 upload_alloc_info.memoryTypeIndex = vk_memory_type(device_data,
1060 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
1061 upload_buffer_req.memoryTypeBits);
1062 VK_CHECK(device_data->vtable.AllocateMemory(device_data->device,
1063 &upload_alloc_info,
1064 NULL,
1065 &data->upload_font_buffer_mem));
1066 VK_CHECK(device_data->vtable.BindBufferMemory(device_data->device,
1067 data->upload_font_buffer,
1068 data->upload_font_buffer_mem, 0));
1069
1070 /* Upload to Buffer */
1071 char* map = NULL;
1072 VK_CHECK(device_data->vtable.MapMemory(device_data->device,
1073 data->upload_font_buffer_mem,
1074 0, upload_size, 0, (void**)(&map)));
1075 memcpy(map, pixels, upload_size);
1076 VkMappedMemoryRange range[1] = {};
1077 range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
1078 range[0].memory = data->upload_font_buffer_mem;
1079 range[0].size = upload_size;
1080 VK_CHECK(device_data->vtable.FlushMappedMemoryRanges(device_data->device, 1, range));
1081 device_data->vtable.UnmapMemory(device_data->device,
1082 data->upload_font_buffer_mem);
1083
1084 /* Copy buffer to image */
1085 VkImageMemoryBarrier copy_barrier[1] = {};
1086 copy_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1087 copy_barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1088 copy_barrier[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1089 copy_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1090 copy_barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1091 copy_barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1092 copy_barrier[0].image = data->font_image;
1093 copy_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1094 copy_barrier[0].subresourceRange.levelCount = 1;
1095 copy_barrier[0].subresourceRange.layerCount = 1;
1096 device_data->vtable.CmdPipelineBarrier(command_buffer,
1097 VK_PIPELINE_STAGE_HOST_BIT,
1098 VK_PIPELINE_STAGE_TRANSFER_BIT,
1099 0, 0, NULL, 0, NULL,
1100 1, copy_barrier);
1101
1102 VkBufferImageCopy region = {};
1103 region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1104 region.imageSubresource.layerCount = 1;
1105 region.imageExtent.width = width;
1106 region.imageExtent.height = height;
1107 region.imageExtent.depth = 1;
1108 device_data->vtable.CmdCopyBufferToImage(command_buffer,
1109 data->upload_font_buffer,
1110 data->font_image,
1111 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1112 1, ®ion);
1113
1114 VkImageMemoryBarrier use_barrier[1] = {};
1115 use_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1116 use_barrier[0].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1117 use_barrier[0].dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
1118 use_barrier[0].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1119 use_barrier[0].newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1120 use_barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1121 use_barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1122 use_barrier[0].image = data->font_image;
1123 use_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1124 use_barrier[0].subresourceRange.levelCount = 1;
1125 use_barrier[0].subresourceRange.layerCount = 1;
1126 device_data->vtable.CmdPipelineBarrier(command_buffer,
1127 VK_PIPELINE_STAGE_TRANSFER_BIT,
1128 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1129 0,
1130 0, NULL,
1131 0, NULL,
1132 1, use_barrier);
1133
1134 /* Store our identifier */
1135 io.Fonts->TexID = (ImTextureID)(intptr_t)data->font_image;
1136 }
1137
CreateOrResizeBuffer(struct device_data * data,VkBuffer * buffer,VkDeviceMemory * buffer_memory,VkDeviceSize * buffer_size,size_t new_size,VkBufferUsageFlagBits usage)1138 static void CreateOrResizeBuffer(struct device_data *data,
1139 VkBuffer *buffer,
1140 VkDeviceMemory *buffer_memory,
1141 VkDeviceSize *buffer_size,
1142 size_t new_size, VkBufferUsageFlagBits usage)
1143 {
1144 if (*buffer != VK_NULL_HANDLE)
1145 data->vtable.DestroyBuffer(data->device, *buffer, NULL);
1146 if (*buffer_memory)
1147 data->vtable.FreeMemory(data->device, *buffer_memory, NULL);
1148
1149 VkBufferCreateInfo buffer_info = {};
1150 buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1151 buffer_info.size = new_size;
1152 buffer_info.usage = usage;
1153 buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1154 VK_CHECK(data->vtable.CreateBuffer(data->device, &buffer_info, NULL, buffer));
1155
1156 VkMemoryRequirements req;
1157 data->vtable.GetBufferMemoryRequirements(data->device, *buffer, &req);
1158 VkMemoryAllocateInfo alloc_info = {};
1159 alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
1160 alloc_info.allocationSize = req.size;
1161 alloc_info.memoryTypeIndex =
1162 vk_memory_type(data, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, req.memoryTypeBits);
1163 VK_CHECK(data->vtable.AllocateMemory(data->device, &alloc_info, NULL, buffer_memory));
1164
1165 VK_CHECK(data->vtable.BindBufferMemory(data->device, *buffer, *buffer_memory, 0));
1166 *buffer_size = new_size;
1167 }
1168
render_swapchain_display(struct swapchain_data * data,struct queue_data * present_queue,const VkSemaphore * wait_semaphores,unsigned n_wait_semaphores,unsigned image_index)1169 static struct overlay_draw *render_swapchain_display(struct swapchain_data *data,
1170 struct queue_data *present_queue,
1171 const VkSemaphore *wait_semaphores,
1172 unsigned n_wait_semaphores,
1173 unsigned image_index)
1174 {
1175 ImDrawData* draw_data = ImGui::GetDrawData();
1176 if (draw_data->TotalVtxCount == 0)
1177 return NULL;
1178
1179 struct device_data *device_data = data->device;
1180 struct overlay_draw *draw = get_overlay_draw(data);
1181
1182 device_data->vtable.ResetCommandBuffer(draw->command_buffer, 0);
1183
1184 VkRenderPassBeginInfo render_pass_info = {};
1185 render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
1186 render_pass_info.renderPass = data->render_pass;
1187 render_pass_info.framebuffer = data->framebuffers[image_index];
1188 render_pass_info.renderArea.extent.width = data->width;
1189 render_pass_info.renderArea.extent.height = data->height;
1190
1191 VkCommandBufferBeginInfo buffer_begin_info = {};
1192 buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1193
1194 device_data->vtable.BeginCommandBuffer(draw->command_buffer, &buffer_begin_info);
1195
1196 ensure_swapchain_fonts(data, draw->command_buffer);
1197
1198 /* Bounce the image to display back to color attachment layout for
1199 * rendering on top of it.
1200 */
1201 VkImageMemoryBarrier imb;
1202 imb.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1203 imb.pNext = nullptr;
1204 imb.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1205 imb.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1206 imb.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1207 imb.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1208 imb.image = data->images[image_index];
1209 imb.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1210 imb.subresourceRange.baseMipLevel = 0;
1211 imb.subresourceRange.levelCount = 1;
1212 imb.subresourceRange.baseArrayLayer = 0;
1213 imb.subresourceRange.layerCount = 1;
1214 imb.srcQueueFamilyIndex = present_queue->family_index;
1215 imb.dstQueueFamilyIndex = device_data->graphic_queue->family_index;
1216 device_data->vtable.CmdPipelineBarrier(draw->command_buffer,
1217 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1218 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1219 0, /* dependency flags */
1220 0, nullptr, /* memory barriers */
1221 0, nullptr, /* buffer memory barriers */
1222 1, &imb); /* image memory barriers */
1223
1224 device_data->vtable.CmdBeginRenderPass(draw->command_buffer, &render_pass_info,
1225 VK_SUBPASS_CONTENTS_INLINE);
1226
1227 /* Create/Resize vertex & index buffers */
1228 size_t vertex_size = ALIGN(draw_data->TotalVtxCount * sizeof(ImDrawVert), device_data->properties.limits.nonCoherentAtomSize);
1229 size_t index_size = ALIGN(draw_data->TotalIdxCount * sizeof(ImDrawIdx), device_data->properties.limits.nonCoherentAtomSize);
1230 if (draw->vertex_buffer_size < vertex_size) {
1231 CreateOrResizeBuffer(device_data,
1232 &draw->vertex_buffer,
1233 &draw->vertex_buffer_mem,
1234 &draw->vertex_buffer_size,
1235 vertex_size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1236 }
1237 if (draw->index_buffer_size < index_size) {
1238 CreateOrResizeBuffer(device_data,
1239 &draw->index_buffer,
1240 &draw->index_buffer_mem,
1241 &draw->index_buffer_size,
1242 index_size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT);
1243 }
1244
1245 /* Upload vertex & index data */
1246 ImDrawVert* vtx_dst = NULL;
1247 ImDrawIdx* idx_dst = NULL;
1248 VK_CHECK(device_data->vtable.MapMemory(device_data->device, draw->vertex_buffer_mem,
1249 0, vertex_size, 0, (void**)(&vtx_dst)));
1250 VK_CHECK(device_data->vtable.MapMemory(device_data->device, draw->index_buffer_mem,
1251 0, index_size, 0, (void**)(&idx_dst)));
1252 for (int n = 0; n < draw_data->CmdListsCount; n++)
1253 {
1254 const ImDrawList* cmd_list = draw_data->CmdLists[n];
1255 memcpy(vtx_dst, cmd_list->VtxBuffer.Data, cmd_list->VtxBuffer.Size * sizeof(ImDrawVert));
1256 memcpy(idx_dst, cmd_list->IdxBuffer.Data, cmd_list->IdxBuffer.Size * sizeof(ImDrawIdx));
1257 vtx_dst += cmd_list->VtxBuffer.Size;
1258 idx_dst += cmd_list->IdxBuffer.Size;
1259 }
1260 VkMappedMemoryRange range[2] = {};
1261 range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
1262 range[0].memory = draw->vertex_buffer_mem;
1263 range[0].size = VK_WHOLE_SIZE;
1264 range[1].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
1265 range[1].memory = draw->index_buffer_mem;
1266 range[1].size = VK_WHOLE_SIZE;
1267 VK_CHECK(device_data->vtable.FlushMappedMemoryRanges(device_data->device, 2, range));
1268 device_data->vtable.UnmapMemory(device_data->device, draw->vertex_buffer_mem);
1269 device_data->vtable.UnmapMemory(device_data->device, draw->index_buffer_mem);
1270
1271 /* Bind pipeline and descriptor sets */
1272 device_data->vtable.CmdBindPipeline(draw->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, data->pipeline);
1273 VkDescriptorSet desc_set[1] = { data->descriptor_set };
1274 device_data->vtable.CmdBindDescriptorSets(draw->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
1275 data->pipeline_layout, 0, 1, desc_set, 0, NULL);
1276
1277 /* Bind vertex & index buffers */
1278 VkBuffer vertex_buffers[1] = { draw->vertex_buffer };
1279 VkDeviceSize vertex_offset[1] = { 0 };
1280 device_data->vtable.CmdBindVertexBuffers(draw->command_buffer, 0, 1, vertex_buffers, vertex_offset);
1281 device_data->vtable.CmdBindIndexBuffer(draw->command_buffer, draw->index_buffer, 0, VK_INDEX_TYPE_UINT16);
1282
1283 /* Setup viewport */
1284 VkViewport viewport;
1285 viewport.x = 0;
1286 viewport.y = 0;
1287 viewport.width = draw_data->DisplaySize.x;
1288 viewport.height = draw_data->DisplaySize.y;
1289 viewport.minDepth = 0.0f;
1290 viewport.maxDepth = 1.0f;
1291 device_data->vtable.CmdSetViewport(draw->command_buffer, 0, 1, &viewport);
1292
1293
1294 /* Setup scale and translation through push constants :
1295 *
1296 * Our visible imgui space lies from draw_data->DisplayPos (top left) to
1297 * draw_data->DisplayPos+data_data->DisplaySize (bottom right). DisplayMin
1298 * is typically (0,0) for single viewport apps.
1299 */
1300 float scale[2];
1301 scale[0] = 2.0f / draw_data->DisplaySize.x;
1302 scale[1] = 2.0f / draw_data->DisplaySize.y;
1303 float translate[2];
1304 translate[0] = -1.0f - draw_data->DisplayPos.x * scale[0];
1305 translate[1] = -1.0f - draw_data->DisplayPos.y * scale[1];
1306 device_data->vtable.CmdPushConstants(draw->command_buffer, data->pipeline_layout,
1307 VK_SHADER_STAGE_VERTEX_BIT,
1308 sizeof(float) * 0, sizeof(float) * 2, scale);
1309 device_data->vtable.CmdPushConstants(draw->command_buffer, data->pipeline_layout,
1310 VK_SHADER_STAGE_VERTEX_BIT,
1311 sizeof(float) * 2, sizeof(float) * 2, translate);
1312
1313 // Render the command lists:
1314 int vtx_offset = 0;
1315 int idx_offset = 0;
1316 ImVec2 display_pos = draw_data->DisplayPos;
1317 for (int n = 0; n < draw_data->CmdListsCount; n++)
1318 {
1319 const ImDrawList* cmd_list = draw_data->CmdLists[n];
1320 for (int cmd_i = 0; cmd_i < cmd_list->CmdBuffer.Size; cmd_i++)
1321 {
1322 const ImDrawCmd* pcmd = &cmd_list->CmdBuffer[cmd_i];
1323 // Apply scissor/clipping rectangle
1324 // FIXME: We could clamp width/height based on clamped min/max values.
1325 VkRect2D scissor;
1326 scissor.offset.x = (int32_t)(pcmd->ClipRect.x - display_pos.x) > 0 ? (int32_t)(pcmd->ClipRect.x - display_pos.x) : 0;
1327 scissor.offset.y = (int32_t)(pcmd->ClipRect.y - display_pos.y) > 0 ? (int32_t)(pcmd->ClipRect.y - display_pos.y) : 0;
1328 scissor.extent.width = (uint32_t)(pcmd->ClipRect.z - pcmd->ClipRect.x);
1329 scissor.extent.height = (uint32_t)(pcmd->ClipRect.w - pcmd->ClipRect.y + 1); // FIXME: Why +1 here?
1330 device_data->vtable.CmdSetScissor(draw->command_buffer, 0, 1, &scissor);
1331
1332 // Draw
1333 device_data->vtable.CmdDrawIndexed(draw->command_buffer, pcmd->ElemCount, 1, idx_offset, vtx_offset, 0);
1334
1335 idx_offset += pcmd->ElemCount;
1336 }
1337 vtx_offset += cmd_list->VtxBuffer.Size;
1338 }
1339
1340 device_data->vtable.CmdEndRenderPass(draw->command_buffer);
1341
1342 if (device_data->graphic_queue->family_index != present_queue->family_index)
1343 {
1344 /* Transfer the image back to the present queue family
1345 * image layout was already changed to present by the render pass
1346 */
1347 imb.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1348 imb.pNext = nullptr;
1349 imb.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1350 imb.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1351 imb.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1352 imb.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1353 imb.image = data->images[image_index];
1354 imb.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1355 imb.subresourceRange.baseMipLevel = 0;
1356 imb.subresourceRange.levelCount = 1;
1357 imb.subresourceRange.baseArrayLayer = 0;
1358 imb.subresourceRange.layerCount = 1;
1359 imb.srcQueueFamilyIndex = device_data->graphic_queue->family_index;
1360 imb.dstQueueFamilyIndex = present_queue->family_index;
1361 device_data->vtable.CmdPipelineBarrier(draw->command_buffer,
1362 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1363 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1364 0, /* dependency flags */
1365 0, nullptr, /* memory barriers */
1366 0, nullptr, /* buffer memory barriers */
1367 1, &imb); /* image memory barriers */
1368 }
1369
1370 device_data->vtable.EndCommandBuffer(draw->command_buffer);
1371
1372 /* When presenting on a different queue than where we're drawing the
1373 * overlay *AND* when the application does not provide a semaphore to
1374 * vkQueuePresent, insert our own cross engine synchronization
1375 * semaphore.
1376 */
1377 if (n_wait_semaphores == 0 && device_data->graphic_queue->queue != present_queue->queue) {
1378 VkPipelineStageFlags stages_wait = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
1379 VkSubmitInfo submit_info = {};
1380 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1381 submit_info.commandBufferCount = 0;
1382 submit_info.pWaitDstStageMask = &stages_wait;
1383 submit_info.waitSemaphoreCount = 0;
1384 submit_info.signalSemaphoreCount = 1;
1385 submit_info.pSignalSemaphores = &draw->cross_engine_semaphore;
1386
1387 device_data->vtable.QueueSubmit(present_queue->queue, 1, &submit_info, VK_NULL_HANDLE);
1388
1389 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1390 submit_info.commandBufferCount = 1;
1391 submit_info.pWaitDstStageMask = &stages_wait;
1392 submit_info.pCommandBuffers = &draw->command_buffer;
1393 submit_info.waitSemaphoreCount = 1;
1394 submit_info.pWaitSemaphores = &draw->cross_engine_semaphore;
1395 submit_info.signalSemaphoreCount = 1;
1396 submit_info.pSignalSemaphores = &draw->semaphore;
1397
1398 device_data->vtable.QueueSubmit(device_data->graphic_queue->queue, 1, &submit_info, draw->fence);
1399 } else {
1400 VkPipelineStageFlags *stages_wait = (VkPipelineStageFlags*) malloc(sizeof(VkPipelineStageFlags) * n_wait_semaphores);
1401 for (unsigned i = 0; i < n_wait_semaphores; i++)
1402 {
1403 // wait in the fragment stage until the swapchain image is ready
1404 stages_wait[i] = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1405 }
1406
1407 VkSubmitInfo submit_info = {};
1408 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1409 submit_info.commandBufferCount = 1;
1410 submit_info.pCommandBuffers = &draw->command_buffer;
1411 submit_info.pWaitDstStageMask = stages_wait;
1412 submit_info.waitSemaphoreCount = n_wait_semaphores;
1413 submit_info.pWaitSemaphores = wait_semaphores;
1414 submit_info.signalSemaphoreCount = 1;
1415 submit_info.pSignalSemaphores = &draw->semaphore;
1416
1417 device_data->vtable.QueueSubmit(device_data->graphic_queue->queue, 1, &submit_info, draw->fence);
1418
1419 free(stages_wait);
1420 }
1421
1422 return draw;
1423 }
1424
1425 static const uint32_t overlay_vert_spv[] = {
1426 #include "overlay.vert.spv.h"
1427 };
1428 static const uint32_t overlay_frag_spv[] = {
1429 #include "overlay.frag.spv.h"
1430 };
1431
setup_swapchain_data_pipeline(struct swapchain_data * data)1432 static void setup_swapchain_data_pipeline(struct swapchain_data *data)
1433 {
1434 struct device_data *device_data = data->device;
1435 VkShaderModule vert_module, frag_module;
1436
1437 /* Create shader modules */
1438 VkShaderModuleCreateInfo vert_info = {};
1439 vert_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
1440 vert_info.codeSize = sizeof(overlay_vert_spv);
1441 vert_info.pCode = overlay_vert_spv;
1442 VK_CHECK(device_data->vtable.CreateShaderModule(device_data->device,
1443 &vert_info, NULL, &vert_module));
1444 VkShaderModuleCreateInfo frag_info = {};
1445 frag_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
1446 frag_info.codeSize = sizeof(overlay_frag_spv);
1447 frag_info.pCode = (uint32_t*)overlay_frag_spv;
1448 VK_CHECK(device_data->vtable.CreateShaderModule(device_data->device,
1449 &frag_info, NULL, &frag_module));
1450
1451 /* Font sampler */
1452 VkSamplerCreateInfo sampler_info = {};
1453 sampler_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
1454 sampler_info.magFilter = VK_FILTER_LINEAR;
1455 sampler_info.minFilter = VK_FILTER_LINEAR;
1456 sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
1457 sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
1458 sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
1459 sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
1460 sampler_info.minLod = -1000;
1461 sampler_info.maxLod = 1000;
1462 sampler_info.maxAnisotropy = 1.0f;
1463 VK_CHECK(device_data->vtable.CreateSampler(device_data->device, &sampler_info,
1464 NULL, &data->font_sampler));
1465
1466 /* Descriptor pool */
1467 VkDescriptorPoolSize sampler_pool_size = {};
1468 sampler_pool_size.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1469 sampler_pool_size.descriptorCount = 1;
1470 VkDescriptorPoolCreateInfo desc_pool_info = {};
1471 desc_pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1472 desc_pool_info.maxSets = 1;
1473 desc_pool_info.poolSizeCount = 1;
1474 desc_pool_info.pPoolSizes = &sampler_pool_size;
1475 VK_CHECK(device_data->vtable.CreateDescriptorPool(device_data->device,
1476 &desc_pool_info,
1477 NULL, &data->descriptor_pool));
1478
1479 /* Descriptor layout */
1480 VkSampler sampler[1] = { data->font_sampler };
1481 VkDescriptorSetLayoutBinding binding[1] = {};
1482 binding[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1483 binding[0].descriptorCount = 1;
1484 binding[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
1485 binding[0].pImmutableSamplers = sampler;
1486 VkDescriptorSetLayoutCreateInfo set_layout_info = {};
1487 set_layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
1488 set_layout_info.bindingCount = 1;
1489 set_layout_info.pBindings = binding;
1490 VK_CHECK(device_data->vtable.CreateDescriptorSetLayout(device_data->device,
1491 &set_layout_info,
1492 NULL, &data->descriptor_layout));
1493
1494 /* Descriptor set */
1495 VkDescriptorSetAllocateInfo alloc_info = {};
1496 alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
1497 alloc_info.descriptorPool = data->descriptor_pool;
1498 alloc_info.descriptorSetCount = 1;
1499 alloc_info.pSetLayouts = &data->descriptor_layout;
1500 VK_CHECK(device_data->vtable.AllocateDescriptorSets(device_data->device,
1501 &alloc_info,
1502 &data->descriptor_set));
1503
1504 /* Constants: we are using 'vec2 offset' and 'vec2 scale' instead of a full
1505 * 3d projection matrix
1506 */
1507 VkPushConstantRange push_constants[1] = {};
1508 push_constants[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
1509 push_constants[0].offset = sizeof(float) * 0;
1510 push_constants[0].size = sizeof(float) * 4;
1511 VkPipelineLayoutCreateInfo layout_info = {};
1512 layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
1513 layout_info.setLayoutCount = 1;
1514 layout_info.pSetLayouts = &data->descriptor_layout;
1515 layout_info.pushConstantRangeCount = 1;
1516 layout_info.pPushConstantRanges = push_constants;
1517 VK_CHECK(device_data->vtable.CreatePipelineLayout(device_data->device,
1518 &layout_info,
1519 NULL, &data->pipeline_layout));
1520
1521 VkPipelineShaderStageCreateInfo stage[2] = {};
1522 stage[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1523 stage[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
1524 stage[0].module = vert_module;
1525 stage[0].pName = "main";
1526 stage[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1527 stage[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
1528 stage[1].module = frag_module;
1529 stage[1].pName = "main";
1530
1531 VkVertexInputBindingDescription binding_desc[1] = {};
1532 binding_desc[0].stride = sizeof(ImDrawVert);
1533 binding_desc[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
1534
1535 VkVertexInputAttributeDescription attribute_desc[3] = {};
1536 attribute_desc[0].location = 0;
1537 attribute_desc[0].binding = binding_desc[0].binding;
1538 attribute_desc[0].format = VK_FORMAT_R32G32_SFLOAT;
1539 attribute_desc[0].offset = IM_OFFSETOF(ImDrawVert, pos);
1540 attribute_desc[1].location = 1;
1541 attribute_desc[1].binding = binding_desc[0].binding;
1542 attribute_desc[1].format = VK_FORMAT_R32G32_SFLOAT;
1543 attribute_desc[1].offset = IM_OFFSETOF(ImDrawVert, uv);
1544 attribute_desc[2].location = 2;
1545 attribute_desc[2].binding = binding_desc[0].binding;
1546 attribute_desc[2].format = VK_FORMAT_R8G8B8A8_UNORM;
1547 attribute_desc[2].offset = IM_OFFSETOF(ImDrawVert, col);
1548
1549 VkPipelineVertexInputStateCreateInfo vertex_info = {};
1550 vertex_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
1551 vertex_info.vertexBindingDescriptionCount = 1;
1552 vertex_info.pVertexBindingDescriptions = binding_desc;
1553 vertex_info.vertexAttributeDescriptionCount = 3;
1554 vertex_info.pVertexAttributeDescriptions = attribute_desc;
1555
1556 VkPipelineInputAssemblyStateCreateInfo ia_info = {};
1557 ia_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
1558 ia_info.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
1559
1560 VkPipelineViewportStateCreateInfo viewport_info = {};
1561 viewport_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
1562 viewport_info.viewportCount = 1;
1563 viewport_info.scissorCount = 1;
1564
1565 VkPipelineRasterizationStateCreateInfo raster_info = {};
1566 raster_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
1567 raster_info.polygonMode = VK_POLYGON_MODE_FILL;
1568 raster_info.cullMode = VK_CULL_MODE_NONE;
1569 raster_info.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
1570 raster_info.lineWidth = 1.0f;
1571
1572 VkPipelineMultisampleStateCreateInfo ms_info = {};
1573 ms_info.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
1574 ms_info.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
1575
1576 VkPipelineColorBlendAttachmentState color_attachment[1] = {};
1577 color_attachment[0].blendEnable = VK_TRUE;
1578 color_attachment[0].srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
1579 color_attachment[0].dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
1580 color_attachment[0].colorBlendOp = VK_BLEND_OP_ADD;
1581 color_attachment[0].srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
1582 color_attachment[0].dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
1583 color_attachment[0].alphaBlendOp = VK_BLEND_OP_ADD;
1584 color_attachment[0].colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
1585 VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
1586
1587 VkPipelineDepthStencilStateCreateInfo depth_info = {};
1588 depth_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
1589
1590 VkPipelineColorBlendStateCreateInfo blend_info = {};
1591 blend_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
1592 blend_info.attachmentCount = 1;
1593 blend_info.pAttachments = color_attachment;
1594
1595 VkDynamicState dynamic_states[2] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
1596 VkPipelineDynamicStateCreateInfo dynamic_state = {};
1597 dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
1598 dynamic_state.dynamicStateCount = (uint32_t)IM_ARRAYSIZE(dynamic_states);
1599 dynamic_state.pDynamicStates = dynamic_states;
1600
1601 VkGraphicsPipelineCreateInfo info = {};
1602 info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
1603 info.flags = 0;
1604 info.stageCount = 2;
1605 info.pStages = stage;
1606 info.pVertexInputState = &vertex_info;
1607 info.pInputAssemblyState = &ia_info;
1608 info.pViewportState = &viewport_info;
1609 info.pRasterizationState = &raster_info;
1610 info.pMultisampleState = &ms_info;
1611 info.pDepthStencilState = &depth_info;
1612 info.pColorBlendState = &blend_info;
1613 info.pDynamicState = &dynamic_state;
1614 info.layout = data->pipeline_layout;
1615 info.renderPass = data->render_pass;
1616 VK_CHECK(
1617 device_data->vtable.CreateGraphicsPipelines(device_data->device, VK_NULL_HANDLE,
1618 1, &info,
1619 NULL, &data->pipeline));
1620
1621 device_data->vtable.DestroyShaderModule(device_data->device, vert_module, NULL);
1622 device_data->vtable.DestroyShaderModule(device_data->device, frag_module, NULL);
1623
1624 ImGuiIO& io = ImGui::GetIO();
1625 unsigned char* pixels;
1626 int width, height;
1627 io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height);
1628
1629 /* Font image */
1630 VkImageCreateInfo image_info = {};
1631 image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
1632 image_info.imageType = VK_IMAGE_TYPE_2D;
1633 image_info.format = VK_FORMAT_R8G8B8A8_UNORM;
1634 image_info.extent.width = width;
1635 image_info.extent.height = height;
1636 image_info.extent.depth = 1;
1637 image_info.mipLevels = 1;
1638 image_info.arrayLayers = 1;
1639 image_info.samples = VK_SAMPLE_COUNT_1_BIT;
1640 image_info.tiling = VK_IMAGE_TILING_OPTIMAL;
1641 image_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1642 image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1643 image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1644 VK_CHECK(device_data->vtable.CreateImage(device_data->device, &image_info,
1645 NULL, &data->font_image));
1646 VkMemoryRequirements font_image_req;
1647 device_data->vtable.GetImageMemoryRequirements(device_data->device,
1648 data->font_image, &font_image_req);
1649 VkMemoryAllocateInfo image_alloc_info = {};
1650 image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
1651 image_alloc_info.allocationSize = font_image_req.size;
1652 image_alloc_info.memoryTypeIndex = vk_memory_type(device_data,
1653 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
1654 font_image_req.memoryTypeBits);
1655 VK_CHECK(device_data->vtable.AllocateMemory(device_data->device, &image_alloc_info,
1656 NULL, &data->font_mem));
1657 VK_CHECK(device_data->vtable.BindImageMemory(device_data->device,
1658 data->font_image,
1659 data->font_mem, 0));
1660
1661 /* Font image view */
1662 VkImageViewCreateInfo view_info = {};
1663 view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
1664 view_info.image = data->font_image;
1665 view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
1666 view_info.format = VK_FORMAT_R8G8B8A8_UNORM;
1667 view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1668 view_info.subresourceRange.levelCount = 1;
1669 view_info.subresourceRange.layerCount = 1;
1670 VK_CHECK(device_data->vtable.CreateImageView(device_data->device, &view_info,
1671 NULL, &data->font_image_view));
1672
1673 /* Descriptor set */
1674 VkDescriptorImageInfo desc_image[1] = {};
1675 desc_image[0].sampler = data->font_sampler;
1676 desc_image[0].imageView = data->font_image_view;
1677 desc_image[0].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1678 VkWriteDescriptorSet write_desc[1] = {};
1679 write_desc[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1680 write_desc[0].dstSet = data->descriptor_set;
1681 write_desc[0].descriptorCount = 1;
1682 write_desc[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1683 write_desc[0].pImageInfo = desc_image;
1684 device_data->vtable.UpdateDescriptorSets(device_data->device, 1, write_desc, 0, NULL);
1685 }
1686
setup_swapchain_data(struct swapchain_data * data,const VkSwapchainCreateInfoKHR * pCreateInfo)1687 static void setup_swapchain_data(struct swapchain_data *data,
1688 const VkSwapchainCreateInfoKHR *pCreateInfo)
1689 {
1690 data->width = pCreateInfo->imageExtent.width;
1691 data->height = pCreateInfo->imageExtent.height;
1692 data->format = pCreateInfo->imageFormat;
1693
1694 data->imgui_context = ImGui::CreateContext();
1695 ImGui::SetCurrentContext(data->imgui_context);
1696
1697 ImGui::GetIO().IniFilename = NULL;
1698 ImGui::GetIO().DisplaySize = ImVec2((float)data->width, (float)data->height);
1699
1700 struct device_data *device_data = data->device;
1701
1702 /* Render pass */
1703 VkAttachmentDescription attachment_desc = {};
1704 attachment_desc.format = pCreateInfo->imageFormat;
1705 attachment_desc.samples = VK_SAMPLE_COUNT_1_BIT;
1706 attachment_desc.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
1707 attachment_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
1708 attachment_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
1709 attachment_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
1710 attachment_desc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1711 attachment_desc.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1712 VkAttachmentReference color_attachment = {};
1713 color_attachment.attachment = 0;
1714 color_attachment.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1715 VkSubpassDescription subpass = {};
1716 subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1717 subpass.colorAttachmentCount = 1;
1718 subpass.pColorAttachments = &color_attachment;
1719 VkSubpassDependency dependency = {};
1720 dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
1721 dependency.dstSubpass = 0;
1722 dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1723 dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1724 dependency.srcAccessMask = 0;
1725 dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1726 VkRenderPassCreateInfo render_pass_info = {};
1727 render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
1728 render_pass_info.attachmentCount = 1;
1729 render_pass_info.pAttachments = &attachment_desc;
1730 render_pass_info.subpassCount = 1;
1731 render_pass_info.pSubpasses = &subpass;
1732 render_pass_info.dependencyCount = 1;
1733 render_pass_info.pDependencies = &dependency;
1734 VK_CHECK(device_data->vtable.CreateRenderPass(device_data->device,
1735 &render_pass_info,
1736 NULL, &data->render_pass));
1737
1738 setup_swapchain_data_pipeline(data);
1739
1740 VK_CHECK(device_data->vtable.GetSwapchainImagesKHR(device_data->device,
1741 data->swapchain,
1742 &data->n_images,
1743 NULL));
1744
1745 data->images = ralloc_array(data, VkImage, data->n_images);
1746 data->image_views = ralloc_array(data, VkImageView, data->n_images);
1747 data->framebuffers = ralloc_array(data, VkFramebuffer, data->n_images);
1748
1749 VK_CHECK(device_data->vtable.GetSwapchainImagesKHR(device_data->device,
1750 data->swapchain,
1751 &data->n_images,
1752 data->images));
1753
1754 /* Image views */
1755 VkImageViewCreateInfo view_info = {};
1756 view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
1757 view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
1758 view_info.format = pCreateInfo->imageFormat;
1759 view_info.components.r = VK_COMPONENT_SWIZZLE_R;
1760 view_info.components.g = VK_COMPONENT_SWIZZLE_G;
1761 view_info.components.b = VK_COMPONENT_SWIZZLE_B;
1762 view_info.components.a = VK_COMPONENT_SWIZZLE_A;
1763 view_info.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
1764 for (uint32_t i = 0; i < data->n_images; i++) {
1765 view_info.image = data->images[i];
1766 VK_CHECK(device_data->vtable.CreateImageView(device_data->device,
1767 &view_info, NULL,
1768 &data->image_views[i]));
1769 }
1770
1771 /* Framebuffers */
1772 VkImageView attachment[1];
1773 VkFramebufferCreateInfo fb_info = {};
1774 fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
1775 fb_info.renderPass = data->render_pass;
1776 fb_info.attachmentCount = 1;
1777 fb_info.pAttachments = attachment;
1778 fb_info.width = data->width;
1779 fb_info.height = data->height;
1780 fb_info.layers = 1;
1781 for (uint32_t i = 0; i < data->n_images; i++) {
1782 attachment[0] = data->image_views[i];
1783 VK_CHECK(device_data->vtable.CreateFramebuffer(device_data->device, &fb_info,
1784 NULL, &data->framebuffers[i]));
1785 }
1786
1787 /* Command buffer pool */
1788 VkCommandPoolCreateInfo cmd_buffer_pool_info = {};
1789 cmd_buffer_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1790 cmd_buffer_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1791 cmd_buffer_pool_info.queueFamilyIndex = device_data->graphic_queue->family_index;
1792 VK_CHECK(device_data->vtable.CreateCommandPool(device_data->device,
1793 &cmd_buffer_pool_info,
1794 NULL, &data->command_pool));
1795 }
1796
shutdown_swapchain_data(struct swapchain_data * data)1797 static void shutdown_swapchain_data(struct swapchain_data *data)
1798 {
1799 struct device_data *device_data = data->device;
1800
1801 list_for_each_entry_safe(struct overlay_draw, draw, &data->draws, link) {
1802 device_data->vtable.DestroySemaphore(device_data->device, draw->cross_engine_semaphore, NULL);
1803 device_data->vtable.DestroySemaphore(device_data->device, draw->semaphore, NULL);
1804 device_data->vtable.DestroyFence(device_data->device, draw->fence, NULL);
1805 device_data->vtable.DestroyBuffer(device_data->device, draw->vertex_buffer, NULL);
1806 device_data->vtable.DestroyBuffer(device_data->device, draw->index_buffer, NULL);
1807 device_data->vtable.FreeMemory(device_data->device, draw->vertex_buffer_mem, NULL);
1808 device_data->vtable.FreeMemory(device_data->device, draw->index_buffer_mem, NULL);
1809 }
1810
1811 for (uint32_t i = 0; i < data->n_images; i++) {
1812 device_data->vtable.DestroyImageView(device_data->device, data->image_views[i], NULL);
1813 device_data->vtable.DestroyFramebuffer(device_data->device, data->framebuffers[i], NULL);
1814 }
1815
1816 device_data->vtable.DestroyRenderPass(device_data->device, data->render_pass, NULL);
1817
1818 device_data->vtable.DestroyCommandPool(device_data->device, data->command_pool, NULL);
1819
1820 device_data->vtable.DestroyPipeline(device_data->device, data->pipeline, NULL);
1821 device_data->vtable.DestroyPipelineLayout(device_data->device, data->pipeline_layout, NULL);
1822
1823 device_data->vtable.DestroyDescriptorPool(device_data->device,
1824 data->descriptor_pool, NULL);
1825 device_data->vtable.DestroyDescriptorSetLayout(device_data->device,
1826 data->descriptor_layout, NULL);
1827
1828 device_data->vtable.DestroySampler(device_data->device, data->font_sampler, NULL);
1829 device_data->vtable.DestroyImageView(device_data->device, data->font_image_view, NULL);
1830 device_data->vtable.DestroyImage(device_data->device, data->font_image, NULL);
1831 device_data->vtable.FreeMemory(device_data->device, data->font_mem, NULL);
1832
1833 device_data->vtable.DestroyBuffer(device_data->device, data->upload_font_buffer, NULL);
1834 device_data->vtable.FreeMemory(device_data->device, data->upload_font_buffer_mem, NULL);
1835
1836 ImGui::DestroyContext(data->imgui_context);
1837 }
1838
before_present(struct swapchain_data * swapchain_data,struct queue_data * present_queue,const VkSemaphore * wait_semaphores,unsigned n_wait_semaphores,unsigned imageIndex)1839 static struct overlay_draw *before_present(struct swapchain_data *swapchain_data,
1840 struct queue_data *present_queue,
1841 const VkSemaphore *wait_semaphores,
1842 unsigned n_wait_semaphores,
1843 unsigned imageIndex)
1844 {
1845 struct instance_data *instance_data = swapchain_data->device->instance;
1846 struct overlay_draw *draw = NULL;
1847
1848 snapshot_swapchain_frame(swapchain_data);
1849
1850 if (!instance_data->params.no_display && swapchain_data->n_frames > 0) {
1851 compute_swapchain_display(swapchain_data);
1852 draw = render_swapchain_display(swapchain_data, present_queue,
1853 wait_semaphores, n_wait_semaphores,
1854 imageIndex);
1855 }
1856
1857 return draw;
1858 }
1859
overlay_CreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)1860 static VkResult overlay_CreateSwapchainKHR(
1861 VkDevice device,
1862 const VkSwapchainCreateInfoKHR* pCreateInfo,
1863 const VkAllocationCallbacks* pAllocator,
1864 VkSwapchainKHR* pSwapchain)
1865 {
1866 struct device_data *device_data = FIND(struct device_data, device);
1867 VkResult result = device_data->vtable.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
1868 if (result != VK_SUCCESS) return result;
1869
1870 struct swapchain_data *swapchain_data = new_swapchain_data(*pSwapchain, device_data);
1871 setup_swapchain_data(swapchain_data, pCreateInfo);
1872 return result;
1873 }
1874
overlay_DestroySwapchainKHR(VkDevice device,VkSwapchainKHR swapchain,const VkAllocationCallbacks * pAllocator)1875 static void overlay_DestroySwapchainKHR(
1876 VkDevice device,
1877 VkSwapchainKHR swapchain,
1878 const VkAllocationCallbacks* pAllocator)
1879 {
1880 if (swapchain == VK_NULL_HANDLE) {
1881 struct device_data *device_data = FIND(struct device_data, device);
1882 device_data->vtable.DestroySwapchainKHR(device, swapchain, pAllocator);
1883 return;
1884 }
1885
1886 struct swapchain_data *swapchain_data =
1887 FIND(struct swapchain_data, swapchain);
1888
1889 shutdown_swapchain_data(swapchain_data);
1890 swapchain_data->device->vtable.DestroySwapchainKHR(device, swapchain, pAllocator);
1891 destroy_swapchain_data(swapchain_data);
1892 }
1893
overlay_QueuePresentKHR(VkQueue queue,const VkPresentInfoKHR * pPresentInfo)1894 static VkResult overlay_QueuePresentKHR(
1895 VkQueue queue,
1896 const VkPresentInfoKHR* pPresentInfo)
1897 {
1898 struct queue_data *queue_data = FIND(struct queue_data, queue);
1899 struct device_data *device_data = queue_data->device;
1900 struct instance_data *instance_data = device_data->instance;
1901 uint32_t query_results[OVERLAY_QUERY_COUNT];
1902
1903 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_frame]++;
1904
1905 if (list_length(&queue_data->running_command_buffer) > 0) {
1906 /* Before getting the query results, make sure the operations have
1907 * completed.
1908 */
1909 VK_CHECK(device_data->vtable.ResetFences(device_data->device,
1910 1, &queue_data->queries_fence));
1911 VK_CHECK(device_data->vtable.QueueSubmit(queue, 0, NULL, queue_data->queries_fence));
1912 VK_CHECK(device_data->vtable.WaitForFences(device_data->device,
1913 1, &queue_data->queries_fence,
1914 VK_FALSE, UINT64_MAX));
1915
1916 /* Now get the results. */
1917 list_for_each_entry_safe(struct command_buffer_data, cmd_buffer_data,
1918 &queue_data->running_command_buffer, link) {
1919 list_delinit(&cmd_buffer_data->link);
1920
1921 if (cmd_buffer_data->pipeline_query_pool) {
1922 memset(query_results, 0, sizeof(query_results));
1923 VK_CHECK(device_data->vtable.GetQueryPoolResults(device_data->device,
1924 cmd_buffer_data->pipeline_query_pool,
1925 cmd_buffer_data->query_index, 1,
1926 sizeof(uint32_t) * OVERLAY_QUERY_COUNT,
1927 query_results, 0, VK_QUERY_RESULT_WAIT_BIT));
1928
1929 for (uint32_t i = OVERLAY_PARAM_ENABLED_vertices;
1930 i <= OVERLAY_PARAM_ENABLED_compute_invocations; i++) {
1931 device_data->frame_stats.stats[i] += query_results[i - OVERLAY_PARAM_ENABLED_vertices];
1932 }
1933 }
1934 if (cmd_buffer_data->timestamp_query_pool) {
1935 uint64_t gpu_timestamps[2] = { 0 };
1936 VK_CHECK(device_data->vtable.GetQueryPoolResults(device_data->device,
1937 cmd_buffer_data->timestamp_query_pool,
1938 cmd_buffer_data->query_index * 2, 2,
1939 2 * sizeof(uint64_t), gpu_timestamps, sizeof(uint64_t),
1940 VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT));
1941
1942 gpu_timestamps[0] &= queue_data->timestamp_mask;
1943 gpu_timestamps[1] &= queue_data->timestamp_mask;
1944 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_gpu_timing] +=
1945 (gpu_timestamps[1] - gpu_timestamps[0]) *
1946 device_data->properties.limits.timestampPeriod;
1947 }
1948 }
1949 }
1950
1951 /* Otherwise we need to add our overlay drawing semaphore to the list of
1952 * semaphores to wait on. If we don't do that the presented picture might
1953 * be have incomplete overlay drawings.
1954 */
1955 VkResult result = VK_SUCCESS;
1956 if (instance_data->params.no_display) {
1957 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
1958 VkSwapchainKHR swapchain = pPresentInfo->pSwapchains[i];
1959 struct swapchain_data *swapchain_data =
1960 FIND(struct swapchain_data, swapchain);
1961
1962 uint32_t image_index = pPresentInfo->pImageIndices[i];
1963
1964 before_present(swapchain_data,
1965 queue_data,
1966 pPresentInfo->pWaitSemaphores,
1967 pPresentInfo->waitSemaphoreCount,
1968 image_index);
1969
1970 VkPresentInfoKHR present_info = *pPresentInfo;
1971 present_info.swapchainCount = 1;
1972 present_info.pSwapchains = &swapchain;
1973 present_info.pImageIndices = &image_index;
1974
1975 uint64_t ts0 = os_time_get();
1976 result = queue_data->device->vtable.QueuePresentKHR(queue, &present_info);
1977 uint64_t ts1 = os_time_get();
1978 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_present_timing] += ts1 - ts0;
1979 }
1980 } else {
1981 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
1982 VkSwapchainKHR swapchain = pPresentInfo->pSwapchains[i];
1983 struct swapchain_data *swapchain_data =
1984 FIND(struct swapchain_data, swapchain);
1985
1986 uint32_t image_index = pPresentInfo->pImageIndices[i];
1987
1988 VkPresentInfoKHR present_info = *pPresentInfo;
1989 present_info.swapchainCount = 1;
1990 present_info.pSwapchains = &swapchain;
1991 present_info.pImageIndices = &image_index;
1992
1993 struct overlay_draw *draw = before_present(swapchain_data,
1994 queue_data,
1995 pPresentInfo->pWaitSemaphores,
1996 pPresentInfo->waitSemaphoreCount,
1997 image_index);
1998
1999 /* Because the submission of the overlay draw waits on the semaphores
2000 * handed for present, we don't need to have this present operation
2001 * wait on them as well, we can just wait on the overlay submission
2002 * semaphore.
2003 */
2004 present_info.pWaitSemaphores = &draw->semaphore;
2005 present_info.waitSemaphoreCount = 1;
2006
2007 uint64_t ts0 = os_time_get();
2008 VkResult chain_result = queue_data->device->vtable.QueuePresentKHR(queue, &present_info);
2009 uint64_t ts1 = os_time_get();
2010 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_present_timing] += ts1 - ts0;
2011 if (pPresentInfo->pResults)
2012 pPresentInfo->pResults[i] = chain_result;
2013 if (chain_result != VK_SUCCESS && result == VK_SUCCESS)
2014 result = chain_result;
2015 }
2016 }
2017 return result;
2018 }
2019
overlay_AcquireNextImageKHR(VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)2020 static VkResult overlay_AcquireNextImageKHR(
2021 VkDevice device,
2022 VkSwapchainKHR swapchain,
2023 uint64_t timeout,
2024 VkSemaphore semaphore,
2025 VkFence fence,
2026 uint32_t* pImageIndex)
2027 {
2028 struct swapchain_data *swapchain_data =
2029 FIND(struct swapchain_data, swapchain);
2030 struct device_data *device_data = swapchain_data->device;
2031
2032 uint64_t ts0 = os_time_get();
2033 VkResult result = device_data->vtable.AcquireNextImageKHR(device, swapchain, timeout,
2034 semaphore, fence, pImageIndex);
2035 uint64_t ts1 = os_time_get();
2036
2037 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire_timing] += ts1 - ts0;
2038 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire]++;
2039
2040 return result;
2041 }
2042
overlay_AcquireNextImage2KHR(VkDevice device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)2043 static VkResult overlay_AcquireNextImage2KHR(
2044 VkDevice device,
2045 const VkAcquireNextImageInfoKHR* pAcquireInfo,
2046 uint32_t* pImageIndex)
2047 {
2048 struct swapchain_data *swapchain_data =
2049 FIND(struct swapchain_data, pAcquireInfo->swapchain);
2050 struct device_data *device_data = swapchain_data->device;
2051
2052 uint64_t ts0 = os_time_get();
2053 VkResult result = device_data->vtable.AcquireNextImage2KHR(device, pAcquireInfo, pImageIndex);
2054 uint64_t ts1 = os_time_get();
2055
2056 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire_timing] += ts1 - ts0;
2057 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire]++;
2058
2059 return result;
2060 }
2061
overlay_CmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)2062 static void overlay_CmdDraw(
2063 VkCommandBuffer commandBuffer,
2064 uint32_t vertexCount,
2065 uint32_t instanceCount,
2066 uint32_t firstVertex,
2067 uint32_t firstInstance)
2068 {
2069 struct command_buffer_data *cmd_buffer_data =
2070 FIND(struct command_buffer_data, commandBuffer);
2071 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw]++;
2072 struct device_data *device_data = cmd_buffer_data->device;
2073 device_data->vtable.CmdDraw(commandBuffer, vertexCount, instanceCount,
2074 firstVertex, firstInstance);
2075 }
2076
overlay_CmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)2077 static void overlay_CmdDrawIndexed(
2078 VkCommandBuffer commandBuffer,
2079 uint32_t indexCount,
2080 uint32_t instanceCount,
2081 uint32_t firstIndex,
2082 int32_t vertexOffset,
2083 uint32_t firstInstance)
2084 {
2085 struct command_buffer_data *cmd_buffer_data =
2086 FIND(struct command_buffer_data, commandBuffer);
2087 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indexed]++;
2088 struct device_data *device_data = cmd_buffer_data->device;
2089 device_data->vtable.CmdDrawIndexed(commandBuffer, indexCount, instanceCount,
2090 firstIndex, vertexOffset, firstInstance);
2091 }
2092
overlay_CmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)2093 static void overlay_CmdDrawIndirect(
2094 VkCommandBuffer commandBuffer,
2095 VkBuffer buffer,
2096 VkDeviceSize offset,
2097 uint32_t drawCount,
2098 uint32_t stride)
2099 {
2100 struct command_buffer_data *cmd_buffer_data =
2101 FIND(struct command_buffer_data, commandBuffer);
2102 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indirect]++;
2103 struct device_data *device_data = cmd_buffer_data->device;
2104 device_data->vtable.CmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
2105 }
2106
overlay_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)2107 static void overlay_CmdDrawIndexedIndirect(
2108 VkCommandBuffer commandBuffer,
2109 VkBuffer buffer,
2110 VkDeviceSize offset,
2111 uint32_t drawCount,
2112 uint32_t stride)
2113 {
2114 struct command_buffer_data *cmd_buffer_data =
2115 FIND(struct command_buffer_data, commandBuffer);
2116 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indexed_indirect]++;
2117 struct device_data *device_data = cmd_buffer_data->device;
2118 device_data->vtable.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
2119 }
2120
overlay_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)2121 static void overlay_CmdDrawIndirectCount(
2122 VkCommandBuffer commandBuffer,
2123 VkBuffer buffer,
2124 VkDeviceSize offset,
2125 VkBuffer countBuffer,
2126 VkDeviceSize countBufferOffset,
2127 uint32_t maxDrawCount,
2128 uint32_t stride)
2129 {
2130 struct command_buffer_data *cmd_buffer_data =
2131 FIND(struct command_buffer_data, commandBuffer);
2132 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indirect_count]++;
2133 struct device_data *device_data = cmd_buffer_data->device;
2134 device_data->vtable.CmdDrawIndirectCount(commandBuffer, buffer, offset,
2135 countBuffer, countBufferOffset,
2136 maxDrawCount, stride);
2137 }
2138
overlay_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)2139 static void overlay_CmdDrawIndexedIndirectCount(
2140 VkCommandBuffer commandBuffer,
2141 VkBuffer buffer,
2142 VkDeviceSize offset,
2143 VkBuffer countBuffer,
2144 VkDeviceSize countBufferOffset,
2145 uint32_t maxDrawCount,
2146 uint32_t stride)
2147 {
2148 struct command_buffer_data *cmd_buffer_data =
2149 FIND(struct command_buffer_data, commandBuffer);
2150 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indexed_indirect_count]++;
2151 struct device_data *device_data = cmd_buffer_data->device;
2152 device_data->vtable.CmdDrawIndexedIndirectCount(commandBuffer, buffer, offset,
2153 countBuffer, countBufferOffset,
2154 maxDrawCount, stride);
2155 }
2156
overlay_CmdDispatch(VkCommandBuffer commandBuffer,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ)2157 static void overlay_CmdDispatch(
2158 VkCommandBuffer commandBuffer,
2159 uint32_t groupCountX,
2160 uint32_t groupCountY,
2161 uint32_t groupCountZ)
2162 {
2163 struct command_buffer_data *cmd_buffer_data =
2164 FIND(struct command_buffer_data, commandBuffer);
2165 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_dispatch]++;
2166 struct device_data *device_data = cmd_buffer_data->device;
2167 device_data->vtable.CmdDispatch(commandBuffer, groupCountX, groupCountY, groupCountZ);
2168 }
2169
overlay_CmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset)2170 static void overlay_CmdDispatchIndirect(
2171 VkCommandBuffer commandBuffer,
2172 VkBuffer buffer,
2173 VkDeviceSize offset)
2174 {
2175 struct command_buffer_data *cmd_buffer_data =
2176 FIND(struct command_buffer_data, commandBuffer);
2177 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_dispatch_indirect]++;
2178 struct device_data *device_data = cmd_buffer_data->device;
2179 device_data->vtable.CmdDispatchIndirect(commandBuffer, buffer, offset);
2180 }
2181
overlay_CmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline pipeline)2182 static void overlay_CmdBindPipeline(
2183 VkCommandBuffer commandBuffer,
2184 VkPipelineBindPoint pipelineBindPoint,
2185 VkPipeline pipeline)
2186 {
2187 struct command_buffer_data *cmd_buffer_data =
2188 FIND(struct command_buffer_data, commandBuffer);
2189 switch (pipelineBindPoint) {
2190 case VK_PIPELINE_BIND_POINT_GRAPHICS: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_graphics]++; break;
2191 case VK_PIPELINE_BIND_POINT_COMPUTE: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_compute]++; break;
2192 case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_raytracing]++; break;
2193 default: break;
2194 }
2195 struct device_data *device_data = cmd_buffer_data->device;
2196 device_data->vtable.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
2197 }
2198
overlay_BeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)2199 static VkResult overlay_BeginCommandBuffer(
2200 VkCommandBuffer commandBuffer,
2201 const VkCommandBufferBeginInfo* pBeginInfo)
2202 {
2203 struct command_buffer_data *cmd_buffer_data =
2204 FIND(struct command_buffer_data, commandBuffer);
2205 struct device_data *device_data = cmd_buffer_data->device;
2206
2207 memset(&cmd_buffer_data->stats, 0, sizeof(cmd_buffer_data->stats));
2208
2209 /* We don't record any query in secondary command buffers, just make sure
2210 * we have the right inheritance.
2211 */
2212 if (cmd_buffer_data->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
2213 VkCommandBufferBeginInfo begin_info = *pBeginInfo;
2214
2215 struct VkBaseOutStructure *new_pnext =
2216 clone_chain((const struct VkBaseInStructure *)pBeginInfo->pNext);
2217 VkCommandBufferInheritanceInfo inhe_info;
2218
2219 /* If there was no pNext chain given or we managed to copy it, we can
2220 * add our stuff in there.
2221 *
2222 * Otherwise, keep the old pointer. We failed to copy the pNext chain,
2223 * meaning there is an unknown extension somewhere in there.
2224 */
2225 if (new_pnext || pBeginInfo->pNext == NULL) {
2226 begin_info.pNext = new_pnext;
2227
2228 VkCommandBufferInheritanceInfo *parent_inhe_info = (VkCommandBufferInheritanceInfo *)
2229 vk_find_struct(new_pnext, COMMAND_BUFFER_INHERITANCE_INFO);
2230 inhe_info = (VkCommandBufferInheritanceInfo) {
2231 VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
2232 NULL,
2233 VK_NULL_HANDLE,
2234 0,
2235 VK_NULL_HANDLE,
2236 VK_FALSE,
2237 0,
2238 overlay_query_flags,
2239 };
2240
2241 if (parent_inhe_info)
2242 parent_inhe_info->pipelineStatistics = overlay_query_flags;
2243 else
2244 __vk_append_struct(&begin_info, &inhe_info);
2245 }
2246
2247 VkResult result = device_data->vtable.BeginCommandBuffer(
2248 commandBuffer, &begin_info);
2249
2250 free_chain(new_pnext);
2251
2252 return result;
2253 }
2254
2255 /* Otherwise record a begin query as first command. */
2256 VkResult result = device_data->vtable.BeginCommandBuffer(commandBuffer, pBeginInfo);
2257
2258 if (result == VK_SUCCESS) {
2259 if (cmd_buffer_data->pipeline_query_pool) {
2260 device_data->vtable.CmdResetQueryPool(commandBuffer,
2261 cmd_buffer_data->pipeline_query_pool,
2262 cmd_buffer_data->query_index, 1);
2263 }
2264 if (cmd_buffer_data->timestamp_query_pool) {
2265 device_data->vtable.CmdResetQueryPool(commandBuffer,
2266 cmd_buffer_data->timestamp_query_pool,
2267 cmd_buffer_data->query_index * 2, 2);
2268 }
2269 if (cmd_buffer_data->pipeline_query_pool) {
2270 device_data->vtable.CmdBeginQuery(commandBuffer,
2271 cmd_buffer_data->pipeline_query_pool,
2272 cmd_buffer_data->query_index, 0);
2273 }
2274 if (cmd_buffer_data->timestamp_query_pool) {
2275 device_data->vtable.CmdWriteTimestamp(commandBuffer,
2276 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
2277 cmd_buffer_data->timestamp_query_pool,
2278 cmd_buffer_data->query_index * 2);
2279 }
2280 }
2281
2282 return result;
2283 }
2284
overlay_EndCommandBuffer(VkCommandBuffer commandBuffer)2285 static VkResult overlay_EndCommandBuffer(
2286 VkCommandBuffer commandBuffer)
2287 {
2288 struct command_buffer_data *cmd_buffer_data =
2289 FIND(struct command_buffer_data, commandBuffer);
2290 struct device_data *device_data = cmd_buffer_data->device;
2291
2292 if (cmd_buffer_data->timestamp_query_pool) {
2293 device_data->vtable.CmdWriteTimestamp(commandBuffer,
2294 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
2295 cmd_buffer_data->timestamp_query_pool,
2296 cmd_buffer_data->query_index * 2 + 1);
2297 }
2298 if (cmd_buffer_data->pipeline_query_pool) {
2299 device_data->vtable.CmdEndQuery(commandBuffer,
2300 cmd_buffer_data->pipeline_query_pool,
2301 cmd_buffer_data->query_index);
2302 }
2303
2304 return device_data->vtable.EndCommandBuffer(commandBuffer);
2305 }
2306
overlay_ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)2307 static VkResult overlay_ResetCommandBuffer(
2308 VkCommandBuffer commandBuffer,
2309 VkCommandBufferResetFlags flags)
2310 {
2311 struct command_buffer_data *cmd_buffer_data =
2312 FIND(struct command_buffer_data, commandBuffer);
2313 struct device_data *device_data = cmd_buffer_data->device;
2314
2315 memset(&cmd_buffer_data->stats, 0, sizeof(cmd_buffer_data->stats));
2316
2317 return device_data->vtable.ResetCommandBuffer(commandBuffer, flags);
2318 }
2319
overlay_CmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)2320 static void overlay_CmdExecuteCommands(
2321 VkCommandBuffer commandBuffer,
2322 uint32_t commandBufferCount,
2323 const VkCommandBuffer* pCommandBuffers)
2324 {
2325 struct command_buffer_data *cmd_buffer_data =
2326 FIND(struct command_buffer_data, commandBuffer);
2327 struct device_data *device_data = cmd_buffer_data->device;
2328
2329 /* Add the stats of the executed command buffers to the primary one. */
2330 for (uint32_t c = 0; c < commandBufferCount; c++) {
2331 struct command_buffer_data *sec_cmd_buffer_data =
2332 FIND(struct command_buffer_data, pCommandBuffers[c]);
2333
2334 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++)
2335 cmd_buffer_data->stats.stats[s] += sec_cmd_buffer_data->stats.stats[s];
2336 }
2337
2338 device_data->vtable.CmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
2339 }
2340
overlay_AllocateCommandBuffers(VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)2341 static VkResult overlay_AllocateCommandBuffers(
2342 VkDevice device,
2343 const VkCommandBufferAllocateInfo* pAllocateInfo,
2344 VkCommandBuffer* pCommandBuffers)
2345 {
2346 struct device_data *device_data = FIND(struct device_data, device);
2347 VkResult result =
2348 device_data->vtable.AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
2349 if (result != VK_SUCCESS)
2350 return result;
2351
2352 VkQueryPool pipeline_query_pool = VK_NULL_HANDLE;
2353 VkQueryPool timestamp_query_pool = VK_NULL_HANDLE;
2354 if (device_data->pipeline_statistics_enabled &&
2355 pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2356 VkQueryPoolCreateInfo pool_info = {
2357 VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
2358 NULL,
2359 0,
2360 VK_QUERY_TYPE_PIPELINE_STATISTICS,
2361 pAllocateInfo->commandBufferCount,
2362 overlay_query_flags,
2363 };
2364 VK_CHECK(device_data->vtable.CreateQueryPool(device_data->device, &pool_info,
2365 NULL, &pipeline_query_pool));
2366 }
2367 if (device_data->instance->params.enabled[OVERLAY_PARAM_ENABLED_gpu_timing]) {
2368 VkQueryPoolCreateInfo pool_info = {
2369 VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
2370 NULL,
2371 0,
2372 VK_QUERY_TYPE_TIMESTAMP,
2373 pAllocateInfo->commandBufferCount * 2,
2374 0,
2375 };
2376 VK_CHECK(device_data->vtable.CreateQueryPool(device_data->device, &pool_info,
2377 NULL, ×tamp_query_pool));
2378 }
2379
2380 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
2381 new_command_buffer_data(pCommandBuffers[i], pAllocateInfo->level,
2382 pipeline_query_pool, timestamp_query_pool,
2383 i, device_data);
2384 }
2385
2386 if (pipeline_query_pool)
2387 map_object(HKEY(pipeline_query_pool), (void *)(uintptr_t) pAllocateInfo->commandBufferCount);
2388 if (timestamp_query_pool)
2389 map_object(HKEY(timestamp_query_pool), (void *)(uintptr_t) pAllocateInfo->commandBufferCount);
2390
2391 return result;
2392 }
2393
overlay_FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)2394 static void overlay_FreeCommandBuffers(
2395 VkDevice device,
2396 VkCommandPool commandPool,
2397 uint32_t commandBufferCount,
2398 const VkCommandBuffer* pCommandBuffers)
2399 {
2400 struct device_data *device_data = FIND(struct device_data, device);
2401 for (uint32_t i = 0; i < commandBufferCount; i++) {
2402 struct command_buffer_data *cmd_buffer_data =
2403 FIND(struct command_buffer_data, pCommandBuffers[i]);
2404
2405 /* It is legal to free a NULL command buffer*/
2406 if (!cmd_buffer_data)
2407 continue;
2408
2409 uint64_t count = (uintptr_t)find_object_data(HKEY(cmd_buffer_data->pipeline_query_pool));
2410 if (count == 1) {
2411 unmap_object(HKEY(cmd_buffer_data->pipeline_query_pool));
2412 device_data->vtable.DestroyQueryPool(device_data->device,
2413 cmd_buffer_data->pipeline_query_pool, NULL);
2414 } else if (count != 0) {
2415 map_object(HKEY(cmd_buffer_data->pipeline_query_pool), (void *)(uintptr_t)(count - 1));
2416 }
2417 count = (uintptr_t)find_object_data(HKEY(cmd_buffer_data->timestamp_query_pool));
2418 if (count == 1) {
2419 unmap_object(HKEY(cmd_buffer_data->timestamp_query_pool));
2420 device_data->vtable.DestroyQueryPool(device_data->device,
2421 cmd_buffer_data->timestamp_query_pool, NULL);
2422 } else if (count != 0) {
2423 map_object(HKEY(cmd_buffer_data->timestamp_query_pool), (void *)(uintptr_t)(count - 1));
2424 }
2425 destroy_command_buffer_data(cmd_buffer_data);
2426 }
2427
2428 device_data->vtable.FreeCommandBuffers(device, commandPool,
2429 commandBufferCount, pCommandBuffers);
2430 }
2431
overlay_QueueSubmit(VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)2432 static VkResult overlay_QueueSubmit(
2433 VkQueue queue,
2434 uint32_t submitCount,
2435 const VkSubmitInfo* pSubmits,
2436 VkFence fence)
2437 {
2438 struct queue_data *queue_data = FIND(struct queue_data, queue);
2439 struct device_data *device_data = queue_data->device;
2440
2441 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_submit]++;
2442
2443 for (uint32_t s = 0; s < submitCount; s++) {
2444 for (uint32_t c = 0; c < pSubmits[s].commandBufferCount; c++) {
2445 struct command_buffer_data *cmd_buffer_data =
2446 FIND(struct command_buffer_data, pSubmits[s].pCommandBuffers[c]);
2447
2448 /* Merge the submitted command buffer stats into the device. */
2449 for (uint32_t st = 0; st < OVERLAY_PARAM_ENABLED_MAX; st++)
2450 device_data->frame_stats.stats[st] += cmd_buffer_data->stats.stats[st];
2451
2452 /* Attach the command buffer to the queue so we remember to read its
2453 * pipeline statistics & timestamps at QueuePresent().
2454 */
2455 if (!cmd_buffer_data->pipeline_query_pool &&
2456 !cmd_buffer_data->timestamp_query_pool)
2457 continue;
2458
2459 if (list_is_empty(&cmd_buffer_data->link)) {
2460 list_addtail(&cmd_buffer_data->link,
2461 &queue_data->running_command_buffer);
2462 } else {
2463 fprintf(stderr, "Command buffer submitted multiple times before present.\n"
2464 "This could lead to invalid data.\n");
2465 }
2466 }
2467 }
2468
2469 return device_data->vtable.QueueSubmit(queue, submitCount, pSubmits, fence);
2470 }
2471
overlay_QueueSubmit2KHR(VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)2472 static VkResult overlay_QueueSubmit2KHR(
2473 VkQueue queue,
2474 uint32_t submitCount,
2475 const VkSubmitInfo2* pSubmits,
2476 VkFence fence)
2477 {
2478 struct queue_data *queue_data = FIND(struct queue_data, queue);
2479 struct device_data *device_data = queue_data->device;
2480
2481 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_submit]++;
2482
2483 for (uint32_t s = 0; s < submitCount; s++) {
2484 for (uint32_t c = 0; c < pSubmits[s].commandBufferInfoCount; c++) {
2485 struct command_buffer_data *cmd_buffer_data =
2486 FIND(struct command_buffer_data, pSubmits[s].pCommandBufferInfos[c].commandBuffer);
2487
2488 /* Merge the submitted command buffer stats into the device. */
2489 for (uint32_t st = 0; st < OVERLAY_PARAM_ENABLED_MAX; st++)
2490 device_data->frame_stats.stats[st] += cmd_buffer_data->stats.stats[st];
2491
2492 /* Attach the command buffer to the queue so we remember to read its
2493 * pipeline statistics & timestamps at QueuePresent().
2494 */
2495 if (!cmd_buffer_data->pipeline_query_pool &&
2496 !cmd_buffer_data->timestamp_query_pool)
2497 continue;
2498
2499 if (list_is_empty(&cmd_buffer_data->link)) {
2500 list_addtail(&cmd_buffer_data->link,
2501 &queue_data->running_command_buffer);
2502 } else {
2503 fprintf(stderr, "Command buffer submitted multiple times before present.\n"
2504 "This could lead to invalid data.\n");
2505 }
2506 }
2507 }
2508
2509 return device_data->vtable.QueueSubmit2KHR(queue, submitCount, pSubmits, fence);
2510 }
2511
overlay_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)2512 static VkResult overlay_CreateDevice(
2513 VkPhysicalDevice physicalDevice,
2514 const VkDeviceCreateInfo* pCreateInfo,
2515 const VkAllocationCallbacks* pAllocator,
2516 VkDevice* pDevice)
2517 {
2518 struct instance_data *instance_data =
2519 FIND(struct instance_data, physicalDevice);
2520 VkLayerDeviceCreateInfo *chain_info =
2521 get_device_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2522
2523 assert(chain_info->u.pLayerInfo);
2524 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2525 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2526 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
2527 if (fpCreateDevice == NULL) {
2528 return VK_ERROR_INITIALIZATION_FAILED;
2529 }
2530
2531 // Advance the link info for the next element on the chain
2532 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2533
2534 VkPhysicalDeviceFeatures device_features = {};
2535 VkPhysicalDeviceFeatures *device_features_ptr = NULL;
2536
2537 VkDeviceCreateInfo create_info = *pCreateInfo;
2538
2539 struct VkBaseOutStructure *new_pnext =
2540 clone_chain((const struct VkBaseInStructure *) pCreateInfo->pNext);
2541 if (new_pnext != NULL) {
2542 create_info.pNext = new_pnext;
2543
2544 VkPhysicalDeviceFeatures2 *device_features2 = (VkPhysicalDeviceFeatures2 *)
2545 vk_find_struct(new_pnext, PHYSICAL_DEVICE_FEATURES_2);
2546 if (device_features2) {
2547 /* Can't use device_info->pEnabledFeatures when VkPhysicalDeviceFeatures2 is present */
2548 device_features_ptr = &device_features2->features;
2549 } else {
2550 if (create_info.pEnabledFeatures)
2551 device_features = *(create_info.pEnabledFeatures);
2552 device_features_ptr = &device_features;
2553 create_info.pEnabledFeatures = &device_features;
2554 }
2555
2556 if (instance_data->pipeline_statistics_enabled) {
2557 device_features_ptr->inheritedQueries = true;
2558 device_features_ptr->pipelineStatisticsQuery = true;
2559 }
2560 }
2561
2562 VkResult result = fpCreateDevice(physicalDevice, &create_info, pAllocator, pDevice);
2563 free_chain(new_pnext);
2564 if (result != VK_SUCCESS) return result;
2565
2566 struct device_data *device_data = new_device_data(*pDevice, instance_data);
2567 device_data->physical_device = physicalDevice;
2568 vk_device_dispatch_table_load(&device_data->vtable,
2569 fpGetDeviceProcAddr, *pDevice);
2570
2571 instance_data->pd_vtable.GetPhysicalDeviceProperties(device_data->physical_device,
2572 &device_data->properties);
2573
2574 VkLayerDeviceCreateInfo *load_data_info =
2575 get_device_chain_info(pCreateInfo, VK_LOADER_DATA_CALLBACK);
2576 device_data->set_device_loader_data = load_data_info->u.pfnSetDeviceLoaderData;
2577
2578 device_map_queues(device_data, pCreateInfo);
2579
2580 device_data->pipeline_statistics_enabled =
2581 new_pnext != NULL &&
2582 instance_data->pipeline_statistics_enabled;
2583
2584 return result;
2585 }
2586
overlay_DestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)2587 static void overlay_DestroyDevice(
2588 VkDevice device,
2589 const VkAllocationCallbacks* pAllocator)
2590 {
2591 struct device_data *device_data = FIND(struct device_data, device);
2592 device_unmap_queues(device_data);
2593 device_data->vtable.DestroyDevice(device, pAllocator);
2594 destroy_device_data(device_data);
2595 }
2596
overlay_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)2597 static VkResult overlay_CreateInstance(
2598 const VkInstanceCreateInfo* pCreateInfo,
2599 const VkAllocationCallbacks* pAllocator,
2600 VkInstance* pInstance)
2601 {
2602 VkLayerInstanceCreateInfo *chain_info =
2603 get_instance_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2604
2605 assert(chain_info->u.pLayerInfo);
2606 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr =
2607 chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2608 PFN_vkCreateInstance fpCreateInstance =
2609 (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
2610 if (fpCreateInstance == NULL) {
2611 return VK_ERROR_INITIALIZATION_FAILED;
2612 }
2613
2614 // Advance the link info for the next element on the chain
2615 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2616
2617 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
2618 if (result != VK_SUCCESS) return result;
2619
2620 struct instance_data *instance_data = new_instance_data(*pInstance);
2621 vk_instance_dispatch_table_load(&instance_data->vtable,
2622 fpGetInstanceProcAddr,
2623 instance_data->instance);
2624 vk_physical_device_dispatch_table_load(&instance_data->pd_vtable,
2625 fpGetInstanceProcAddr,
2626 instance_data->instance);
2627 instance_data_map_physical_devices(instance_data, true);
2628
2629 parse_overlay_env(&instance_data->params, getenv("VK_LAYER_MESA_OVERLAY_CONFIG"));
2630
2631 /* If there's no control file, and an output_file was specified, start
2632 * capturing fps data right away.
2633 */
2634 instance_data->capture_enabled =
2635 instance_data->params.output_file && instance_data->params.control < 0;
2636 instance_data->capture_started = instance_data->capture_enabled;
2637
2638 for (int i = OVERLAY_PARAM_ENABLED_vertices;
2639 i <= OVERLAY_PARAM_ENABLED_compute_invocations; i++) {
2640 if (instance_data->params.enabled[i]) {
2641 instance_data->pipeline_statistics_enabled = true;
2642 break;
2643 }
2644 }
2645
2646 return result;
2647 }
2648
overlay_DestroyInstance(VkInstance instance,const VkAllocationCallbacks * pAllocator)2649 static void overlay_DestroyInstance(
2650 VkInstance instance,
2651 const VkAllocationCallbacks* pAllocator)
2652 {
2653 struct instance_data *instance_data = FIND(struct instance_data, instance);
2654 instance_data_map_physical_devices(instance_data, false);
2655 instance_data->vtable.DestroyInstance(instance, pAllocator);
2656 destroy_instance_data(instance_data);
2657 }
2658
2659 static const struct {
2660 const char *name;
2661 void *ptr;
2662 } name_to_funcptr_map[] = {
2663 { "vkGetInstanceProcAddr", (void *) vkGetInstanceProcAddr },
2664 { "vkGetDeviceProcAddr", (void *) vkGetDeviceProcAddr },
2665 #define ADD_HOOK(fn) { "vk" # fn, (void *) overlay_ ## fn }
2666 #define ADD_ALIAS_HOOK(alias, fn) { "vk" # alias, (void *) overlay_ ## fn }
2667 ADD_HOOK(AllocateCommandBuffers),
2668 ADD_HOOK(FreeCommandBuffers),
2669 ADD_HOOK(ResetCommandBuffer),
2670 ADD_HOOK(BeginCommandBuffer),
2671 ADD_HOOK(EndCommandBuffer),
2672 ADD_HOOK(CmdExecuteCommands),
2673
2674 ADD_HOOK(CmdDraw),
2675 ADD_HOOK(CmdDrawIndexed),
2676 ADD_HOOK(CmdDrawIndirect),
2677 ADD_HOOK(CmdDrawIndexedIndirect),
2678 ADD_HOOK(CmdDispatch),
2679 ADD_HOOK(CmdDispatchIndirect),
2680 ADD_HOOK(CmdDrawIndirectCount),
2681 ADD_ALIAS_HOOK(CmdDrawIndirectCountKHR, CmdDrawIndirectCount),
2682 ADD_HOOK(CmdDrawIndexedIndirectCount),
2683 ADD_ALIAS_HOOK(CmdDrawIndexedIndirectCountKHR, CmdDrawIndexedIndirectCount),
2684
2685 ADD_HOOK(CmdBindPipeline),
2686
2687 ADD_HOOK(CreateSwapchainKHR),
2688 ADD_HOOK(QueuePresentKHR),
2689 ADD_HOOK(DestroySwapchainKHR),
2690 ADD_HOOK(AcquireNextImageKHR),
2691 ADD_HOOK(AcquireNextImage2KHR),
2692
2693 ADD_HOOK(QueueSubmit),
2694 ADD_HOOK(QueueSubmit2KHR),
2695
2696 ADD_HOOK(CreateDevice),
2697 ADD_HOOK(DestroyDevice),
2698
2699 ADD_HOOK(CreateInstance),
2700 ADD_HOOK(DestroyInstance),
2701 #undef ADD_HOOK
2702 #undef ADD_ALIAS_HOOK
2703 };
2704
find_ptr(const char * name)2705 static void *find_ptr(const char *name)
2706 {
2707 for (uint32_t i = 0; i < ARRAY_SIZE(name_to_funcptr_map); i++) {
2708 if (strcmp(name, name_to_funcptr_map[i].name) == 0)
2709 return name_to_funcptr_map[i].ptr;
2710 }
2711
2712 return NULL;
2713 }
2714
vkGetDeviceProcAddr(VkDevice dev,const char * funcName)2715 PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev,
2716 const char *funcName)
2717 {
2718 void *ptr = find_ptr(funcName);
2719 if (ptr) return reinterpret_cast<PFN_vkVoidFunction>(ptr);
2720
2721 if (dev == NULL) return NULL;
2722
2723 struct device_data *device_data = FIND(struct device_data, dev);
2724 if (device_data->vtable.GetDeviceProcAddr == NULL) return NULL;
2725 return device_data->vtable.GetDeviceProcAddr(dev, funcName);
2726 }
2727
vkGetInstanceProcAddr(VkInstance instance,const char * funcName)2728 PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance,
2729 const char *funcName)
2730 {
2731 void *ptr = find_ptr(funcName);
2732 if (ptr) return reinterpret_cast<PFN_vkVoidFunction>(ptr);
2733
2734 if (instance == NULL) return NULL;
2735
2736 struct instance_data *instance_data = FIND(struct instance_data, instance);
2737 if (instance_data->vtable.GetInstanceProcAddr == NULL) return NULL;
2738 return instance_data->vtable.GetInstanceProcAddr(instance, funcName);
2739 }
2740