1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <string.h>
25 #include <stdlib.h>
26 #include <assert.h>
27
28 #include <vulkan/vulkan_core.h>
29 #include <vulkan/vk_layer.h>
30
31 #include "git_sha1.h"
32
33 #include "imgui.h"
34
35 #include "overlay_params.h"
36
37 #include "util/u_debug.h"
38 #include "util/hash_table.h"
39 #include "util/list.h"
40 #include "util/ralloc.h"
41 #include "util/os_time.h"
42 #include "util/os_socket.h"
43 #include "util/simple_mtx.h"
44 #include "util/u_math.h"
45
46 #include "vk_enum_to_str.h"
47 #include "vk_dispatch_table.h"
48 #include "vk_util.h"
49
50 /* Mapped from VkInstace/VkPhysicalDevice */
51 struct instance_data {
52 struct vk_instance_dispatch_table vtable;
53 struct vk_physical_device_dispatch_table pd_vtable;
54 VkInstance instance;
55
56 struct overlay_params params;
57 bool pipeline_statistics_enabled;
58
59 bool first_line_printed;
60
61 int control_client;
62
63 /* Dumping of frame stats to a file has been enabled. */
64 bool capture_enabled;
65
66 /* Dumping of frame stats to a file has been enabled and started. */
67 bool capture_started;
68
69 int socket;
70 };
71
72 struct frame_stat {
73 uint64_t stats[OVERLAY_PARAM_ENABLED_MAX];
74 };
75
76 /* Mapped from VkDevice */
77 struct queue_data;
78 struct device_data {
79 struct instance_data *instance;
80
81 PFN_vkSetDeviceLoaderData set_device_loader_data;
82
83 struct vk_device_dispatch_table vtable;
84 VkPhysicalDevice physical_device;
85 VkDevice device;
86
87 VkPhysicalDeviceProperties properties;
88
89 struct queue_data *graphic_queue;
90
91 struct queue_data **queues;
92 uint32_t n_queues;
93
94 bool pipeline_statistics_enabled;
95
96 /* For a single frame */
97 struct frame_stat frame_stats;
98 };
99
100 /* Mapped from VkCommandBuffer */
101 struct command_buffer_data {
102 struct device_data *device;
103
104 VkCommandBufferLevel level;
105
106 VkCommandBuffer cmd_buffer;
107 VkQueryPool pipeline_query_pool;
108 VkQueryPool timestamp_query_pool;
109 uint32_t query_index;
110
111 struct frame_stat stats;
112
113 struct list_head link; /* link into queue_data::running_command_buffer */
114 };
115
116 /* Mapped from VkQueue */
117 struct queue_data {
118 struct device_data *device;
119
120 VkQueue queue;
121 VkQueueFlags flags;
122 uint32_t family_index;
123 uint64_t timestamp_mask;
124
125 VkFence queries_fence;
126
127 struct list_head running_command_buffer;
128 };
129
130 struct overlay_draw {
131 struct list_head link;
132
133 VkCommandBuffer command_buffer;
134
135 VkSemaphore cross_engine_semaphore;
136
137 VkSemaphore semaphore;
138 VkFence fence;
139
140 VkBuffer vertex_buffer;
141 VkDeviceMemory vertex_buffer_mem;
142 VkDeviceSize vertex_buffer_size;
143
144 VkBuffer index_buffer;
145 VkDeviceMemory index_buffer_mem;
146 VkDeviceSize index_buffer_size;
147 };
148
149 /* Mapped from VkSwapchainKHR */
150 struct swapchain_data {
151 struct device_data *device;
152
153 VkSwapchainKHR swapchain;
154 unsigned width, height;
155 VkFormat format;
156
157 uint32_t n_images;
158 VkImage *images;
159 VkImageView *image_views;
160 VkFramebuffer *framebuffers;
161
162 VkRenderPass render_pass;
163
164 VkDescriptorPool descriptor_pool;
165 VkDescriptorSetLayout descriptor_layout;
166 VkDescriptorSet descriptor_set;
167
168 VkSampler font_sampler;
169
170 VkPipelineLayout pipeline_layout;
171 VkPipeline pipeline;
172
173 VkCommandPool command_pool;
174
175 struct list_head draws; /* List of struct overlay_draw */
176
177 bool font_uploaded;
178 VkImage font_image;
179 VkImageView font_image_view;
180 VkDeviceMemory font_mem;
181 VkBuffer upload_font_buffer;
182 VkDeviceMemory upload_font_buffer_mem;
183
184 /**/
185 ImGuiContext* imgui_context;
186 ImVec2 window_size;
187
188 /**/
189 uint64_t n_frames;
190 uint64_t last_present_time;
191
192 unsigned n_frames_since_update;
193 uint64_t last_fps_update;
194 double fps;
195
196 enum overlay_param_enabled stat_selector;
197 double time_dividor;
198 struct frame_stat stats_min, stats_max;
199 struct frame_stat frames_stats[200];
200
201 /* Over a single frame */
202 struct frame_stat frame_stats;
203
204 /* Over fps_sampling_period */
205 struct frame_stat accumulated_stats;
206 };
207
208 static const VkQueryPipelineStatisticFlags overlay_query_flags =
209 VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT |
210 VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT |
211 VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT |
212 VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT |
213 VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT |
214 VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT |
215 VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT |
216 VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT |
217 VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT |
218 VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT |
219 VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
220 #define OVERLAY_QUERY_COUNT (11)
221
222 static struct hash_table_u64 *vk_object_to_data = NULL;
223 static simple_mtx_t vk_object_to_data_mutex = SIMPLE_MTX_INITIALIZER;
224
225 thread_local ImGuiContext* __MesaImGui;
226
ensure_vk_object_map(void)227 static inline void ensure_vk_object_map(void)
228 {
229 if (!vk_object_to_data)
230 vk_object_to_data = _mesa_hash_table_u64_create(NULL);
231 }
232
233 #define HKEY(obj) ((uint64_t)(obj))
234 #define FIND(type, obj) ((type *)find_object_data(HKEY(obj)))
235
find_object_data(uint64_t obj)236 static void *find_object_data(uint64_t obj)
237 {
238 simple_mtx_lock(&vk_object_to_data_mutex);
239 ensure_vk_object_map();
240 void *data = _mesa_hash_table_u64_search(vk_object_to_data, obj);
241 simple_mtx_unlock(&vk_object_to_data_mutex);
242 return data;
243 }
244
map_object(uint64_t obj,void * data)245 static void map_object(uint64_t obj, void *data)
246 {
247 simple_mtx_lock(&vk_object_to_data_mutex);
248 ensure_vk_object_map();
249 _mesa_hash_table_u64_insert(vk_object_to_data, obj, data);
250 simple_mtx_unlock(&vk_object_to_data_mutex);
251 }
252
unmap_object(uint64_t obj)253 static void unmap_object(uint64_t obj)
254 {
255 simple_mtx_lock(&vk_object_to_data_mutex);
256 _mesa_hash_table_u64_remove(vk_object_to_data, obj);
257 simple_mtx_unlock(&vk_object_to_data_mutex);
258 }
259
260 /**/
261
262 #define VK_CHECK(expr) \
263 do { \
264 VkResult __result = (expr); \
265 if (__result != VK_SUCCESS) { \
266 fprintf(stderr, "'%s' line %i failed with %s\n", \
267 #expr, __LINE__, vk_Result_to_str(__result)); \
268 } \
269 } while (0)
270
271 /**/
272
get_instance_chain_info(const VkInstanceCreateInfo * pCreateInfo,VkLayerFunction func)273 static VkLayerInstanceCreateInfo *get_instance_chain_info(const VkInstanceCreateInfo *pCreateInfo,
274 VkLayerFunction func)
275 {
276 vk_foreach_struct_const(item, pCreateInfo->pNext) {
277 if (item->sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO &&
278 ((VkLayerInstanceCreateInfo *) item)->function == func)
279 return (VkLayerInstanceCreateInfo *) item;
280 }
281 unreachable("instance chain info not found");
282 return NULL;
283 }
284
get_device_chain_info(const VkDeviceCreateInfo * pCreateInfo,VkLayerFunction func)285 static VkLayerDeviceCreateInfo *get_device_chain_info(const VkDeviceCreateInfo *pCreateInfo,
286 VkLayerFunction func)
287 {
288 vk_foreach_struct_const(item, pCreateInfo->pNext) {
289 if (item->sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO &&
290 ((VkLayerDeviceCreateInfo *) item)->function == func)
291 return (VkLayerDeviceCreateInfo *)item;
292 }
293 unreachable("device chain info not found");
294 return NULL;
295 }
296
297 static void
free_chain(struct VkBaseOutStructure * chain)298 free_chain(struct VkBaseOutStructure *chain)
299 {
300 while (chain) {
301 void *node = chain;
302 chain = chain->pNext;
303 free(node);
304 }
305 }
306
307 static struct VkBaseOutStructure *
clone_chain(const struct VkBaseInStructure * chain)308 clone_chain(const struct VkBaseInStructure *chain)
309 {
310 struct VkBaseOutStructure *head = NULL, *tail = NULL;
311
312 vk_foreach_struct_const(item, chain) {
313 size_t item_size = vk_structure_type_size(item);
314 if (item_size == 0) {
315 free_chain(head);
316 return NULL;
317 }
318
319 struct VkBaseOutStructure *new_item =
320 (struct VkBaseOutStructure *)malloc(item_size);;
321
322 memcpy(new_item, item, item_size);
323
324 if (!head)
325 head = new_item;
326 if (tail)
327 tail->pNext = new_item;
328 tail = new_item;
329 }
330
331 return head;
332 }
333
334 /**/
335
new_instance_data(VkInstance instance)336 static struct instance_data *new_instance_data(VkInstance instance)
337 {
338 struct instance_data *data = rzalloc(NULL, struct instance_data);
339 data->instance = instance;
340 data->control_client = -1;
341 data->socket = -1;
342 map_object(HKEY(data->instance), data);
343 return data;
344 }
345
destroy_instance_data(struct instance_data * data)346 static void destroy_instance_data(struct instance_data *data)
347 {
348 if (data->params.output_file)
349 fclose(data->params.output_file);
350 if (data->socket >= 0)
351 os_socket_close(data->socket);
352 unmap_object(HKEY(data->instance));
353 ralloc_free(data);
354 }
355
instance_data_map_physical_devices(struct instance_data * instance_data,bool map)356 static void instance_data_map_physical_devices(struct instance_data *instance_data,
357 bool map)
358 {
359 uint32_t physicalDeviceCount = 0;
360 instance_data->vtable.EnumeratePhysicalDevices(instance_data->instance,
361 &physicalDeviceCount,
362 NULL);
363
364 VkPhysicalDevice *physicalDevices = (VkPhysicalDevice *) malloc(sizeof(VkPhysicalDevice) * physicalDeviceCount);
365 instance_data->vtable.EnumeratePhysicalDevices(instance_data->instance,
366 &physicalDeviceCount,
367 physicalDevices);
368
369 for (uint32_t i = 0; i < physicalDeviceCount; i++) {
370 if (map)
371 map_object(HKEY(physicalDevices[i]), instance_data);
372 else
373 unmap_object(HKEY(physicalDevices[i]));
374 }
375
376 free(physicalDevices);
377 }
378
379 /**/
new_device_data(VkDevice device,struct instance_data * instance)380 static struct device_data *new_device_data(VkDevice device, struct instance_data *instance)
381 {
382 struct device_data *data = rzalloc(NULL, struct device_data);
383 data->instance = instance;
384 data->device = device;
385 map_object(HKEY(data->device), data);
386 return data;
387 }
388
new_queue_data(VkQueue queue,const VkQueueFamilyProperties * family_props,uint32_t family_index,struct device_data * device_data)389 static struct queue_data *new_queue_data(VkQueue queue,
390 const VkQueueFamilyProperties *family_props,
391 uint32_t family_index,
392 struct device_data *device_data)
393 {
394 struct queue_data *data = rzalloc(device_data, struct queue_data);
395 data->device = device_data;
396 data->queue = queue;
397 data->flags = family_props->queueFlags;
398 data->timestamp_mask = (1ull << family_props->timestampValidBits) - 1;
399 data->family_index = family_index;
400 list_inithead(&data->running_command_buffer);
401 map_object(HKEY(data->queue), data);
402
403 /* Fence synchronizing access to queries on that queue. */
404 VkFenceCreateInfo fence_info = {};
405 fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
406 fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
407 VK_CHECK(device_data->vtable.CreateFence(device_data->device,
408 &fence_info,
409 NULL,
410 &data->queries_fence));
411
412 if (data->flags & VK_QUEUE_GRAPHICS_BIT)
413 device_data->graphic_queue = data;
414
415 return data;
416 }
417
destroy_queue(struct queue_data * data)418 static void destroy_queue(struct queue_data *data)
419 {
420 struct device_data *device_data = data->device;
421 device_data->vtable.DestroyFence(device_data->device, data->queries_fence, NULL);
422 unmap_object(HKEY(data->queue));
423 ralloc_free(data);
424 }
425
device_map_queues(struct device_data * data,const VkDeviceCreateInfo * pCreateInfo)426 static void device_map_queues(struct device_data *data,
427 const VkDeviceCreateInfo *pCreateInfo)
428 {
429 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
430 data->n_queues += pCreateInfo->pQueueCreateInfos[i].queueCount;
431 data->queues = ralloc_array(data, struct queue_data *, data->n_queues);
432
433 struct instance_data *instance_data = data->instance;
434 uint32_t n_family_props;
435 instance_data->pd_vtable.GetPhysicalDeviceQueueFamilyProperties(data->physical_device,
436 &n_family_props,
437 NULL);
438 VkQueueFamilyProperties *family_props =
439 (VkQueueFamilyProperties *)malloc(sizeof(VkQueueFamilyProperties) * n_family_props);
440 instance_data->pd_vtable.GetPhysicalDeviceQueueFamilyProperties(data->physical_device,
441 &n_family_props,
442 family_props);
443
444 uint32_t queue_index = 0;
445 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
446 for (uint32_t j = 0; j < pCreateInfo->pQueueCreateInfos[i].queueCount; j++) {
447 VkQueue queue;
448 data->vtable.GetDeviceQueue(data->device,
449 pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex,
450 j, &queue);
451
452 VK_CHECK(data->set_device_loader_data(data->device, queue));
453
454 data->queues[queue_index++] =
455 new_queue_data(queue, &family_props[pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex],
456 pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, data);
457 }
458 }
459
460 free(family_props);
461 }
462
device_unmap_queues(struct device_data * data)463 static void device_unmap_queues(struct device_data *data)
464 {
465 for (uint32_t i = 0; i < data->n_queues; i++)
466 destroy_queue(data->queues[i]);
467 }
468
destroy_device_data(struct device_data * data)469 static void destroy_device_data(struct device_data *data)
470 {
471 unmap_object(HKEY(data->device));
472 ralloc_free(data);
473 }
474
475 /**/
new_command_buffer_data(VkCommandBuffer cmd_buffer,VkCommandBufferLevel level,VkQueryPool pipeline_query_pool,VkQueryPool timestamp_query_pool,uint32_t query_index,struct device_data * device_data)476 static struct command_buffer_data *new_command_buffer_data(VkCommandBuffer cmd_buffer,
477 VkCommandBufferLevel level,
478 VkQueryPool pipeline_query_pool,
479 VkQueryPool timestamp_query_pool,
480 uint32_t query_index,
481 struct device_data *device_data)
482 {
483 struct command_buffer_data *data = rzalloc(NULL, struct command_buffer_data);
484 data->device = device_data;
485 data->cmd_buffer = cmd_buffer;
486 data->level = level;
487 data->pipeline_query_pool = pipeline_query_pool;
488 data->timestamp_query_pool = timestamp_query_pool;
489 data->query_index = query_index;
490 list_inithead(&data->link);
491 map_object(HKEY(data->cmd_buffer), data);
492 return data;
493 }
494
destroy_command_buffer_data(struct command_buffer_data * data)495 static void destroy_command_buffer_data(struct command_buffer_data *data)
496 {
497 unmap_object(HKEY(data->cmd_buffer));
498 list_delinit(&data->link);
499 ralloc_free(data);
500 }
501
502 /**/
new_swapchain_data(VkSwapchainKHR swapchain,struct device_data * device_data)503 static struct swapchain_data *new_swapchain_data(VkSwapchainKHR swapchain,
504 struct device_data *device_data)
505 {
506 struct instance_data *instance_data = device_data->instance;
507 struct swapchain_data *data = rzalloc(NULL, struct swapchain_data);
508 data->device = device_data;
509 data->swapchain = swapchain;
510 data->window_size = ImVec2(instance_data->params.width, instance_data->params.height);
511 list_inithead(&data->draws);
512 map_object(HKEY(data->swapchain), data);
513 return data;
514 }
515
destroy_swapchain_data(struct swapchain_data * data)516 static void destroy_swapchain_data(struct swapchain_data *data)
517 {
518 unmap_object(HKEY(data->swapchain));
519 ralloc_free(data);
520 }
521
get_overlay_draw(struct swapchain_data * data)522 struct overlay_draw *get_overlay_draw(struct swapchain_data *data)
523 {
524 struct device_data *device_data = data->device;
525 struct overlay_draw *draw = list_is_empty(&data->draws) ?
526 NULL : list_first_entry(&data->draws, struct overlay_draw, link);
527
528 VkSemaphoreCreateInfo sem_info = {};
529 sem_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
530
531 if (draw && device_data->vtable.GetFenceStatus(device_data->device, draw->fence) == VK_SUCCESS) {
532 list_del(&draw->link);
533 VK_CHECK(device_data->vtable.ResetFences(device_data->device,
534 1, &draw->fence));
535 list_addtail(&draw->link, &data->draws);
536 return draw;
537 }
538
539 draw = rzalloc(data, struct overlay_draw);
540
541 VkCommandBufferAllocateInfo cmd_buffer_info = {};
542 cmd_buffer_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
543 cmd_buffer_info.commandPool = data->command_pool;
544 cmd_buffer_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
545 cmd_buffer_info.commandBufferCount = 1;
546 VK_CHECK(device_data->vtable.AllocateCommandBuffers(device_data->device,
547 &cmd_buffer_info,
548 &draw->command_buffer));
549 VK_CHECK(device_data->set_device_loader_data(device_data->device,
550 draw->command_buffer));
551
552
553 VkFenceCreateInfo fence_info = {};
554 fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
555 VK_CHECK(device_data->vtable.CreateFence(device_data->device,
556 &fence_info,
557 NULL,
558 &draw->fence));
559
560 VK_CHECK(device_data->vtable.CreateSemaphore(device_data->device, &sem_info,
561 NULL, &draw->semaphore));
562 VK_CHECK(device_data->vtable.CreateSemaphore(device_data->device, &sem_info,
563 NULL, &draw->cross_engine_semaphore));
564
565 list_addtail(&draw->link, &data->draws);
566
567 return draw;
568 }
569
param_unit(enum overlay_param_enabled param)570 static const char *param_unit(enum overlay_param_enabled param)
571 {
572 switch (param) {
573 case OVERLAY_PARAM_ENABLED_frame_timing:
574 case OVERLAY_PARAM_ENABLED_acquire_timing:
575 case OVERLAY_PARAM_ENABLED_present_timing:
576 return "(us)";
577 case OVERLAY_PARAM_ENABLED_gpu_timing:
578 return "(ns)";
579 default:
580 return "";
581 }
582 }
583
parse_command(struct instance_data * instance_data,const char * cmd,unsigned cmdlen,const char * param,unsigned paramlen)584 static void parse_command(struct instance_data *instance_data,
585 const char *cmd, unsigned cmdlen,
586 const char *param, unsigned paramlen)
587 {
588 if (!strncmp(cmd, "capture", cmdlen)) {
589 int value = atoi(param);
590 bool enabled = value > 0;
591
592 if (enabled) {
593 instance_data->capture_enabled = true;
594 } else {
595 instance_data->capture_enabled = false;
596 instance_data->capture_started = false;
597 }
598 }
599 }
600
601 #define BUFSIZE 4096
602
603 /**
604 * This function will process commands through the control file.
605 *
606 * A command starts with a colon, followed by the command, and followed by an
607 * option '=' and a parameter. It has to end with a semi-colon. A full command
608 * + parameter looks like:
609 *
610 * :cmd=param;
611 */
process_char(struct instance_data * instance_data,char c)612 static void process_char(struct instance_data *instance_data, char c)
613 {
614 static char cmd[BUFSIZE];
615 static char param[BUFSIZE];
616
617 static unsigned cmdpos = 0;
618 static unsigned parampos = 0;
619 static bool reading_cmd = false;
620 static bool reading_param = false;
621
622 switch (c) {
623 case ':':
624 cmdpos = 0;
625 parampos = 0;
626 reading_cmd = true;
627 reading_param = false;
628 break;
629 case ';':
630 if (!reading_cmd)
631 break;
632 cmd[cmdpos++] = '\0';
633 param[parampos++] = '\0';
634 parse_command(instance_data, cmd, cmdpos, param, parampos);
635 reading_cmd = false;
636 reading_param = false;
637 break;
638 case '=':
639 if (!reading_cmd)
640 break;
641 reading_param = true;
642 break;
643 default:
644 if (!reading_cmd)
645 break;
646
647 if (reading_param) {
648 /* overflow means an invalid parameter */
649 if (parampos >= BUFSIZE - 1) {
650 reading_cmd = false;
651 reading_param = false;
652 break;
653 }
654
655 param[parampos++] = c;
656 } else {
657 /* overflow means an invalid command */
658 if (cmdpos >= BUFSIZE - 1) {
659 reading_cmd = false;
660 break;
661 }
662
663 cmd[cmdpos++] = c;
664 }
665 }
666 }
667
control_send(struct instance_data * instance_data,const char * cmd,unsigned cmdlen,const char * param,unsigned paramlen)668 static void control_send(struct instance_data *instance_data,
669 const char *cmd, unsigned cmdlen,
670 const char *param, unsigned paramlen)
671 {
672 unsigned msglen = 0;
673 char buffer[BUFSIZE];
674
675 assert(cmdlen + paramlen + 3 < BUFSIZE);
676
677 buffer[msglen++] = ':';
678
679 memcpy(&buffer[msglen], cmd, cmdlen);
680 msglen += cmdlen;
681
682 if (paramlen > 0) {
683 buffer[msglen++] = '=';
684 memcpy(&buffer[msglen], param, paramlen);
685 msglen += paramlen;
686 buffer[msglen++] = ';';
687 }
688
689 os_socket_send(instance_data->control_client, buffer, msglen, 0);
690 }
691
control_send_connection_string(struct device_data * device_data)692 static void control_send_connection_string(struct device_data *device_data)
693 {
694 struct instance_data *instance_data = device_data->instance;
695
696 const char *controlVersionCmd = "MesaOverlayControlVersion";
697 const char *controlVersionString = "1";
698
699 control_send(instance_data, controlVersionCmd, strlen(controlVersionCmd),
700 controlVersionString, strlen(controlVersionString));
701
702 const char *deviceCmd = "DeviceName";
703 const char *deviceName = device_data->properties.deviceName;
704
705 control_send(instance_data, deviceCmd, strlen(deviceCmd),
706 deviceName, strlen(deviceName));
707
708 const char *mesaVersionCmd = "MesaVersion";
709 const char *mesaVersionString = "Mesa " PACKAGE_VERSION MESA_GIT_SHA1;
710
711 control_send(instance_data, mesaVersionCmd, strlen(mesaVersionCmd),
712 mesaVersionString, strlen(mesaVersionString));
713 }
714
control_client_check(struct device_data * device_data)715 static void control_client_check(struct device_data *device_data)
716 {
717 struct instance_data *instance_data = device_data->instance;
718
719 /* Already connected, just return. */
720 if (instance_data->control_client >= 0)
721 return;
722
723 int socket = os_socket_accept(instance_data->socket);
724 if (socket == -1) {
725 if (errno != EAGAIN && errno != EWOULDBLOCK && errno != ECONNABORTED)
726 fprintf(stderr, "ERROR on socket: %s\n", strerror(errno));
727 return;
728 }
729
730 if (socket >= 0) {
731 os_socket_block(socket, false);
732 instance_data->control_client = socket;
733 control_send_connection_string(device_data);
734 }
735 }
736
control_client_disconnected(struct instance_data * instance_data)737 static void control_client_disconnected(struct instance_data *instance_data)
738 {
739 os_socket_close(instance_data->control_client);
740 instance_data->control_client = -1;
741 }
742
process_control_socket(struct instance_data * instance_data)743 static void process_control_socket(struct instance_data *instance_data)
744 {
745 const int client = instance_data->control_client;
746 if (client >= 0) {
747 char buf[BUFSIZE];
748
749 while (true) {
750 ssize_t n = os_socket_recv(client, buf, BUFSIZE, 0);
751
752 if (n == -1) {
753 if (errno == EAGAIN || errno == EWOULDBLOCK) {
754 /* nothing to read, try again later */
755 break;
756 }
757
758 if (errno != ECONNRESET)
759 fprintf(stderr, "ERROR on connection: %s\n", strerror(errno));
760
761 control_client_disconnected(instance_data);
762 } else if (n == 0) {
763 /* recv() returns 0 when the client disconnects */
764 control_client_disconnected(instance_data);
765 }
766
767 for (ssize_t i = 0; i < n; i++) {
768 process_char(instance_data, buf[i]);
769 }
770
771 /* If we try to read BUFSIZE and receive BUFSIZE bytes from the
772 * socket, there's a good chance that there's still more data to be
773 * read, so we will try again. Otherwise, simply be done for this
774 * iteration and try again on the next frame.
775 */
776 if (n < BUFSIZE)
777 break;
778 }
779 }
780 }
781
snapshot_swapchain_frame(struct swapchain_data * data)782 static void snapshot_swapchain_frame(struct swapchain_data *data)
783 {
784 struct device_data *device_data = data->device;
785 struct instance_data *instance_data = device_data->instance;
786 uint32_t f_idx = data->n_frames % ARRAY_SIZE(data->frames_stats);
787 uint64_t now = os_time_get(); /* us */
788
789 if (instance_data->params.control && instance_data->socket < 0) {
790 int ret = os_socket_listen_abstract(instance_data->params.control, 1);
791 if (ret >= 0) {
792 os_socket_block(ret, false);
793 instance_data->socket = ret;
794 } else {
795 fprintf(stderr, "ERROR: Couldn't create socket pipe at '%s'\n", instance_data->params.control);
796 fprintf(stderr, "ERROR: '%s'\n", strerror(errno));
797 }
798 }
799
800 if (instance_data->socket >= 0) {
801 control_client_check(device_data);
802 process_control_socket(instance_data);
803 }
804
805 if (data->last_present_time) {
806 data->frame_stats.stats[OVERLAY_PARAM_ENABLED_frame_timing] =
807 now - data->last_present_time;
808 }
809
810 memset(&data->frames_stats[f_idx], 0, sizeof(data->frames_stats[f_idx]));
811 for (int s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
812 data->frames_stats[f_idx].stats[s] += device_data->frame_stats.stats[s] + data->frame_stats.stats[s];
813 data->accumulated_stats.stats[s] += device_data->frame_stats.stats[s] + data->frame_stats.stats[s];
814 }
815
816 /* If capture has been enabled but it hasn't started yet, it means we are on
817 * the first snapshot after it has been enabled. At this point we want to
818 * use the stats captured so far to update the display, but we don't want
819 * this data to cause noise to the stats that we want to capture from now
820 * on.
821 *
822 * capture_begin == true will trigger an update of the fps on display, and a
823 * flush of the data, but no stats will be written to the output file. This
824 * way, we will have only stats from after the capture has been enabled
825 * written to the output_file.
826 */
827 const bool capture_begin =
828 instance_data->capture_enabled && !instance_data->capture_started;
829
830 if (data->last_fps_update) {
831 double elapsed = (double)(now - data->last_fps_update); /* us */
832 if (capture_begin ||
833 elapsed >= instance_data->params.fps_sampling_period) {
834 data->fps = 1000000.0f * data->n_frames_since_update / elapsed;
835 if (instance_data->capture_started) {
836 if (!instance_data->first_line_printed) {
837 bool first_column = true;
838
839 instance_data->first_line_printed = true;
840
841 #define OVERLAY_PARAM_BOOL(name) \
842 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_##name]) { \
843 fprintf(instance_data->params.output_file, \
844 "%s%s%s", first_column ? "" : ", ", #name, \
845 param_unit(OVERLAY_PARAM_ENABLED_##name)); \
846 first_column = false; \
847 }
848 #define OVERLAY_PARAM_CUSTOM(name)
849 OVERLAY_PARAMS
850 #undef OVERLAY_PARAM_BOOL
851 #undef OVERLAY_PARAM_CUSTOM
852 fprintf(instance_data->params.output_file, "\n");
853 }
854
855 for (int s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
856 if (!instance_data->params.enabled[s])
857 continue;
858 if (s == OVERLAY_PARAM_ENABLED_fps) {
859 fprintf(instance_data->params.output_file,
860 "%s%.2f", s == 0 ? "" : ", ", data->fps);
861 } else {
862 fprintf(instance_data->params.output_file,
863 "%s%" PRIu64, s == 0 ? "" : ", ",
864 data->accumulated_stats.stats[s]);
865 }
866 }
867 fprintf(instance_data->params.output_file, "\n");
868 fflush(instance_data->params.output_file);
869 }
870
871 memset(&data->accumulated_stats, 0, sizeof(data->accumulated_stats));
872 data->n_frames_since_update = 0;
873 data->last_fps_update = now;
874
875 if (capture_begin)
876 instance_data->capture_started = true;
877 }
878 } else {
879 data->last_fps_update = now;
880 }
881
882 memset(&device_data->frame_stats, 0, sizeof(device_data->frame_stats));
883 memset(&data->frame_stats, 0, sizeof(device_data->frame_stats));
884
885 data->last_present_time = now;
886 data->n_frames++;
887 data->n_frames_since_update++;
888 }
889
get_time_stat(void * _data,int _idx)890 static float get_time_stat(void *_data, int _idx)
891 {
892 struct swapchain_data *data = (struct swapchain_data *) _data;
893 if ((ARRAY_SIZE(data->frames_stats) - _idx) > data->n_frames)
894 return 0.0f;
895 int idx = ARRAY_SIZE(data->frames_stats) +
896 data->n_frames < ARRAY_SIZE(data->frames_stats) ?
897 _idx - data->n_frames :
898 _idx + data->n_frames;
899 idx %= ARRAY_SIZE(data->frames_stats);
900 /* Time stats are in us. */
901 return data->frames_stats[idx].stats[data->stat_selector] / data->time_dividor;
902 }
903
get_stat(void * _data,int _idx)904 static float get_stat(void *_data, int _idx)
905 {
906 struct swapchain_data *data = (struct swapchain_data *) _data;
907 if ((ARRAY_SIZE(data->frames_stats) - _idx) > data->n_frames)
908 return 0.0f;
909 int idx = ARRAY_SIZE(data->frames_stats) +
910 data->n_frames < ARRAY_SIZE(data->frames_stats) ?
911 _idx - data->n_frames :
912 _idx + data->n_frames;
913 idx %= ARRAY_SIZE(data->frames_stats);
914 return data->frames_stats[idx].stats[data->stat_selector];
915 }
916
position_layer(struct swapchain_data * data)917 static void position_layer(struct swapchain_data *data)
918
919 {
920 struct device_data *device_data = data->device;
921 struct instance_data *instance_data = device_data->instance;
922 const float margin = 10.0f;
923
924 ImGui::SetNextWindowBgAlpha(0.5);
925 ImGui::SetNextWindowSize(data->window_size, ImGuiCond_Always);
926 switch (instance_data->params.position) {
927 case LAYER_POSITION_TOP_LEFT:
928 ImGui::SetNextWindowPos(ImVec2(margin, margin), ImGuiCond_Always);
929 break;
930 case LAYER_POSITION_TOP_RIGHT:
931 ImGui::SetNextWindowPos(ImVec2(data->width - data->window_size.x - margin, margin),
932 ImGuiCond_Always);
933 break;
934 case LAYER_POSITION_BOTTOM_LEFT:
935 ImGui::SetNextWindowPos(ImVec2(margin, data->height - data->window_size.y - margin),
936 ImGuiCond_Always);
937 break;
938 case LAYER_POSITION_BOTTOM_RIGHT:
939 ImGui::SetNextWindowPos(ImVec2(data->width - data->window_size.x - margin,
940 data->height - data->window_size.y - margin),
941 ImGuiCond_Always);
942 break;
943 }
944 }
945
compute_swapchain_display(struct swapchain_data * data)946 static void compute_swapchain_display(struct swapchain_data *data)
947 {
948 struct device_data *device_data = data->device;
949 struct instance_data *instance_data = device_data->instance;
950
951 ImGui::SetCurrentContext(data->imgui_context);
952 ImGui::NewFrame();
953 position_layer(data);
954 ImGui::Begin("Mesa overlay");
955 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_device])
956 ImGui::Text("Device: %s", device_data->properties.deviceName);
957
958 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_format]) {
959 const char *format_name = vk_Format_to_str(data->format);
960 format_name = format_name ? (format_name + strlen("VK_FORMAT_")) : "unknown";
961 ImGui::Text("Swapchain format: %s", format_name);
962 }
963 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_frame])
964 ImGui::Text("Frames: %" PRIu64, data->n_frames);
965 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_fps])
966 ImGui::Text("FPS: %.2f" , data->fps);
967
968 /* Recompute min/max */
969 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
970 data->stats_min.stats[s] = UINT64_MAX;
971 data->stats_max.stats[s] = 0;
972 }
973 for (uint32_t f = 0; f < MIN2(data->n_frames, ARRAY_SIZE(data->frames_stats)); f++) {
974 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
975 data->stats_min.stats[s] = MIN2(data->frames_stats[f].stats[s],
976 data->stats_min.stats[s]);
977 data->stats_max.stats[s] = MAX2(data->frames_stats[f].stats[s],
978 data->stats_max.stats[s]);
979 }
980 }
981 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
982 assert(data->stats_min.stats[s] != UINT64_MAX);
983 }
984
985 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
986 if (!instance_data->params.enabled[s] ||
987 s == OVERLAY_PARAM_ENABLED_device ||
988 s == OVERLAY_PARAM_ENABLED_format ||
989 s == OVERLAY_PARAM_ENABLED_fps ||
990 s == OVERLAY_PARAM_ENABLED_frame)
991 continue;
992
993 char hash[40];
994 snprintf(hash, sizeof(hash), "##%s", overlay_param_names[s]);
995 data->stat_selector = (enum overlay_param_enabled) s;
996 data->time_dividor = 1000.0f;
997 if (s == OVERLAY_PARAM_ENABLED_gpu_timing)
998 data->time_dividor = 1000000.0f;
999
1000 if (s == OVERLAY_PARAM_ENABLED_frame_timing ||
1001 s == OVERLAY_PARAM_ENABLED_acquire_timing ||
1002 s == OVERLAY_PARAM_ENABLED_present_timing ||
1003 s == OVERLAY_PARAM_ENABLED_gpu_timing) {
1004 double min_time = data->stats_min.stats[s] / data->time_dividor;
1005 double max_time = data->stats_max.stats[s] / data->time_dividor;
1006 ImGui::PlotHistogram(hash, get_time_stat, data,
1007 ARRAY_SIZE(data->frames_stats), 0,
1008 NULL, min_time, max_time,
1009 ImVec2(ImGui::GetContentRegionAvailWidth(), 30));
1010 ImGui::Text("%s: %.3fms [%.3f, %.3f]", overlay_param_names[s],
1011 get_time_stat(data, ARRAY_SIZE(data->frames_stats) - 1),
1012 min_time, max_time);
1013 } else {
1014 ImGui::PlotHistogram(hash, get_stat, data,
1015 ARRAY_SIZE(data->frames_stats), 0,
1016 NULL,
1017 data->stats_min.stats[s],
1018 data->stats_max.stats[s],
1019 ImVec2(ImGui::GetContentRegionAvailWidth(), 30));
1020 ImGui::Text("%s: %.0f [%" PRIu64 ", %" PRIu64 "]", overlay_param_names[s],
1021 get_stat(data, ARRAY_SIZE(data->frames_stats) - 1),
1022 data->stats_min.stats[s], data->stats_max.stats[s]);
1023 }
1024 }
1025 data->window_size = ImVec2(data->window_size.x, ImGui::GetCursorPosY() + 10.0f);
1026 ImGui::End();
1027 ImGui::EndFrame();
1028 ImGui::Render();
1029 }
1030
vk_memory_type(struct device_data * data,VkMemoryPropertyFlags properties,uint32_t type_bits)1031 static uint32_t vk_memory_type(struct device_data *data,
1032 VkMemoryPropertyFlags properties,
1033 uint32_t type_bits)
1034 {
1035 VkPhysicalDeviceMemoryProperties prop;
1036 data->instance->pd_vtable.GetPhysicalDeviceMemoryProperties(data->physical_device, &prop);
1037 for (uint32_t i = 0; i < prop.memoryTypeCount; i++)
1038 if ((prop.memoryTypes[i].propertyFlags & properties) == properties && type_bits & (1<<i))
1039 return i;
1040 return 0xFFFFFFFF; // Unable to find memoryType
1041 }
1042
ensure_swapchain_fonts(struct swapchain_data * data,VkCommandBuffer command_buffer)1043 static void ensure_swapchain_fonts(struct swapchain_data *data,
1044 VkCommandBuffer command_buffer)
1045 {
1046 if (data->font_uploaded)
1047 return;
1048
1049 data->font_uploaded = true;
1050
1051 struct device_data *device_data = data->device;
1052 ImGuiIO& io = ImGui::GetIO();
1053 unsigned char* pixels;
1054 int width, height;
1055 io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height);
1056 size_t upload_size = width * height * 4 * sizeof(char);
1057
1058 /* Upload buffer */
1059 VkBufferCreateInfo buffer_info = {};
1060 buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1061 buffer_info.size = upload_size;
1062 buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1063 buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1064 VK_CHECK(device_data->vtable.CreateBuffer(device_data->device, &buffer_info,
1065 NULL, &data->upload_font_buffer));
1066 VkMemoryRequirements upload_buffer_req;
1067 device_data->vtable.GetBufferMemoryRequirements(device_data->device,
1068 data->upload_font_buffer,
1069 &upload_buffer_req);
1070 VkMemoryAllocateInfo upload_alloc_info = {};
1071 upload_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
1072 upload_alloc_info.allocationSize = upload_buffer_req.size;
1073 upload_alloc_info.memoryTypeIndex = vk_memory_type(device_data,
1074 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
1075 upload_buffer_req.memoryTypeBits);
1076 VK_CHECK(device_data->vtable.AllocateMemory(device_data->device,
1077 &upload_alloc_info,
1078 NULL,
1079 &data->upload_font_buffer_mem));
1080 VK_CHECK(device_data->vtable.BindBufferMemory(device_data->device,
1081 data->upload_font_buffer,
1082 data->upload_font_buffer_mem, 0));
1083
1084 /* Upload to Buffer */
1085 char* map = NULL;
1086 VK_CHECK(device_data->vtable.MapMemory(device_data->device,
1087 data->upload_font_buffer_mem,
1088 0, upload_size, 0, (void**)(&map)));
1089 memcpy(map, pixels, upload_size);
1090 VkMappedMemoryRange range[1] = {};
1091 range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
1092 range[0].memory = data->upload_font_buffer_mem;
1093 range[0].size = upload_size;
1094 VK_CHECK(device_data->vtable.FlushMappedMemoryRanges(device_data->device, 1, range));
1095 device_data->vtable.UnmapMemory(device_data->device,
1096 data->upload_font_buffer_mem);
1097
1098 /* Copy buffer to image */
1099 VkImageMemoryBarrier copy_barrier[1] = {};
1100 copy_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1101 copy_barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1102 copy_barrier[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1103 copy_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1104 copy_barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1105 copy_barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1106 copy_barrier[0].image = data->font_image;
1107 copy_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1108 copy_barrier[0].subresourceRange.levelCount = 1;
1109 copy_barrier[0].subresourceRange.layerCount = 1;
1110 device_data->vtable.CmdPipelineBarrier(command_buffer,
1111 VK_PIPELINE_STAGE_HOST_BIT,
1112 VK_PIPELINE_STAGE_TRANSFER_BIT,
1113 0, 0, NULL, 0, NULL,
1114 1, copy_barrier);
1115
1116 VkBufferImageCopy region = {};
1117 region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1118 region.imageSubresource.layerCount = 1;
1119 region.imageExtent.width = width;
1120 region.imageExtent.height = height;
1121 region.imageExtent.depth = 1;
1122 device_data->vtable.CmdCopyBufferToImage(command_buffer,
1123 data->upload_font_buffer,
1124 data->font_image,
1125 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1126 1, ®ion);
1127
1128 VkImageMemoryBarrier use_barrier[1] = {};
1129 use_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1130 use_barrier[0].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1131 use_barrier[0].dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
1132 use_barrier[0].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1133 use_barrier[0].newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1134 use_barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1135 use_barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1136 use_barrier[0].image = data->font_image;
1137 use_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1138 use_barrier[0].subresourceRange.levelCount = 1;
1139 use_barrier[0].subresourceRange.layerCount = 1;
1140 device_data->vtable.CmdPipelineBarrier(command_buffer,
1141 VK_PIPELINE_STAGE_TRANSFER_BIT,
1142 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1143 0,
1144 0, NULL,
1145 0, NULL,
1146 1, use_barrier);
1147
1148 /* Store our identifier */
1149 io.Fonts->TexID = (ImTextureID)(intptr_t)data->font_image;
1150 }
1151
CreateOrResizeBuffer(struct device_data * data,VkBuffer * buffer,VkDeviceMemory * buffer_memory,VkDeviceSize * buffer_size,size_t new_size,VkBufferUsageFlagBits usage)1152 static void CreateOrResizeBuffer(struct device_data *data,
1153 VkBuffer *buffer,
1154 VkDeviceMemory *buffer_memory,
1155 VkDeviceSize *buffer_size,
1156 size_t new_size, VkBufferUsageFlagBits usage)
1157 {
1158 if (*buffer != VK_NULL_HANDLE)
1159 data->vtable.DestroyBuffer(data->device, *buffer, NULL);
1160 if (*buffer_memory)
1161 data->vtable.FreeMemory(data->device, *buffer_memory, NULL);
1162
1163 VkBufferCreateInfo buffer_info = {};
1164 buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1165 buffer_info.size = new_size;
1166 buffer_info.usage = usage;
1167 buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1168 VK_CHECK(data->vtable.CreateBuffer(data->device, &buffer_info, NULL, buffer));
1169
1170 VkMemoryRequirements req;
1171 data->vtable.GetBufferMemoryRequirements(data->device, *buffer, &req);
1172 VkMemoryAllocateInfo alloc_info = {};
1173 alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
1174 alloc_info.allocationSize = req.size;
1175 alloc_info.memoryTypeIndex =
1176 vk_memory_type(data, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, req.memoryTypeBits);
1177 VK_CHECK(data->vtable.AllocateMemory(data->device, &alloc_info, NULL, buffer_memory));
1178
1179 VK_CHECK(data->vtable.BindBufferMemory(data->device, *buffer, *buffer_memory, 0));
1180 *buffer_size = new_size;
1181 }
1182
render_swapchain_display(struct swapchain_data * data,struct queue_data * present_queue,const VkSemaphore * wait_semaphores,unsigned n_wait_semaphores,unsigned image_index)1183 static struct overlay_draw *render_swapchain_display(struct swapchain_data *data,
1184 struct queue_data *present_queue,
1185 const VkSemaphore *wait_semaphores,
1186 unsigned n_wait_semaphores,
1187 unsigned image_index)
1188 {
1189 ImDrawData* draw_data = ImGui::GetDrawData();
1190 if (draw_data->TotalVtxCount == 0)
1191 return NULL;
1192
1193 struct device_data *device_data = data->device;
1194 struct overlay_draw *draw = get_overlay_draw(data);
1195
1196 device_data->vtable.ResetCommandBuffer(draw->command_buffer, 0);
1197
1198 VkRenderPassBeginInfo render_pass_info = {};
1199 render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
1200 render_pass_info.renderPass = data->render_pass;
1201 render_pass_info.framebuffer = data->framebuffers[image_index];
1202 render_pass_info.renderArea.extent.width = data->width;
1203 render_pass_info.renderArea.extent.height = data->height;
1204
1205 VkCommandBufferBeginInfo buffer_begin_info = {};
1206 buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1207
1208 device_data->vtable.BeginCommandBuffer(draw->command_buffer, &buffer_begin_info);
1209
1210 ensure_swapchain_fonts(data, draw->command_buffer);
1211
1212 /* Bounce the image to display back to color attachment layout for
1213 * rendering on top of it.
1214 */
1215 VkImageMemoryBarrier imb;
1216 imb.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1217 imb.pNext = nullptr;
1218 imb.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1219 imb.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1220 imb.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1221 imb.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1222 imb.image = data->images[image_index];
1223 imb.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1224 imb.subresourceRange.baseMipLevel = 0;
1225 imb.subresourceRange.levelCount = 1;
1226 imb.subresourceRange.baseArrayLayer = 0;
1227 imb.subresourceRange.layerCount = 1;
1228 imb.srcQueueFamilyIndex = present_queue->family_index;
1229 imb.dstQueueFamilyIndex = device_data->graphic_queue->family_index;
1230 device_data->vtable.CmdPipelineBarrier(draw->command_buffer,
1231 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1232 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1233 0, /* dependency flags */
1234 0, nullptr, /* memory barriers */
1235 0, nullptr, /* buffer memory barriers */
1236 1, &imb); /* image memory barriers */
1237
1238 device_data->vtable.CmdBeginRenderPass(draw->command_buffer, &render_pass_info,
1239 VK_SUBPASS_CONTENTS_INLINE);
1240
1241 /* Create/Resize vertex & index buffers */
1242 size_t vertex_size = align_uintptr(draw_data->TotalVtxCount * sizeof(ImDrawVert), device_data->properties.limits.nonCoherentAtomSize);
1243 size_t index_size = align_uintptr(draw_data->TotalIdxCount * sizeof(ImDrawIdx), device_data->properties.limits.nonCoherentAtomSize);
1244 if (draw->vertex_buffer_size < vertex_size) {
1245 CreateOrResizeBuffer(device_data,
1246 &draw->vertex_buffer,
1247 &draw->vertex_buffer_mem,
1248 &draw->vertex_buffer_size,
1249 vertex_size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1250 }
1251 if (draw->index_buffer_size < index_size) {
1252 CreateOrResizeBuffer(device_data,
1253 &draw->index_buffer,
1254 &draw->index_buffer_mem,
1255 &draw->index_buffer_size,
1256 index_size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT);
1257 }
1258
1259 /* Upload vertex & index data */
1260 ImDrawVert* vtx_dst = NULL;
1261 ImDrawIdx* idx_dst = NULL;
1262 VK_CHECK(device_data->vtable.MapMemory(device_data->device, draw->vertex_buffer_mem,
1263 0, vertex_size, 0, (void**)(&vtx_dst)));
1264 VK_CHECK(device_data->vtable.MapMemory(device_data->device, draw->index_buffer_mem,
1265 0, index_size, 0, (void**)(&idx_dst)));
1266 for (int n = 0; n < draw_data->CmdListsCount; n++)
1267 {
1268 const ImDrawList* cmd_list = draw_data->CmdLists[n];
1269 memcpy(vtx_dst, cmd_list->VtxBuffer.Data, cmd_list->VtxBuffer.Size * sizeof(ImDrawVert));
1270 memcpy(idx_dst, cmd_list->IdxBuffer.Data, cmd_list->IdxBuffer.Size * sizeof(ImDrawIdx));
1271 vtx_dst += cmd_list->VtxBuffer.Size;
1272 idx_dst += cmd_list->IdxBuffer.Size;
1273 }
1274 VkMappedMemoryRange range[2] = {};
1275 range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
1276 range[0].memory = draw->vertex_buffer_mem;
1277 range[0].size = VK_WHOLE_SIZE;
1278 range[1].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
1279 range[1].memory = draw->index_buffer_mem;
1280 range[1].size = VK_WHOLE_SIZE;
1281 VK_CHECK(device_data->vtable.FlushMappedMemoryRanges(device_data->device, 2, range));
1282 device_data->vtable.UnmapMemory(device_data->device, draw->vertex_buffer_mem);
1283 device_data->vtable.UnmapMemory(device_data->device, draw->index_buffer_mem);
1284
1285 /* Bind pipeline and descriptor sets */
1286 device_data->vtable.CmdBindPipeline(draw->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, data->pipeline);
1287 VkDescriptorSet desc_set[1] = { data->descriptor_set };
1288 device_data->vtable.CmdBindDescriptorSets(draw->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
1289 data->pipeline_layout, 0, 1, desc_set, 0, NULL);
1290
1291 /* Bind vertex & index buffers */
1292 VkBuffer vertex_buffers[1] = { draw->vertex_buffer };
1293 VkDeviceSize vertex_offset[1] = { 0 };
1294 device_data->vtable.CmdBindVertexBuffers(draw->command_buffer, 0, 1, vertex_buffers, vertex_offset);
1295 device_data->vtable.CmdBindIndexBuffer(draw->command_buffer, draw->index_buffer, 0, VK_INDEX_TYPE_UINT16);
1296
1297 /* Setup viewport */
1298 VkViewport viewport;
1299 viewport.x = 0;
1300 viewport.y = 0;
1301 viewport.width = draw_data->DisplaySize.x;
1302 viewport.height = draw_data->DisplaySize.y;
1303 viewport.minDepth = 0.0f;
1304 viewport.maxDepth = 1.0f;
1305 device_data->vtable.CmdSetViewport(draw->command_buffer, 0, 1, &viewport);
1306
1307
1308 /* Setup scale and translation through push constants :
1309 *
1310 * Our visible imgui space lies from draw_data->DisplayPos (top left) to
1311 * draw_data->DisplayPos+data_data->DisplaySize (bottom right). DisplayMin
1312 * is typically (0,0) for single viewport apps.
1313 */
1314 float scale[2];
1315 scale[0] = 2.0f / draw_data->DisplaySize.x;
1316 scale[1] = 2.0f / draw_data->DisplaySize.y;
1317 float translate[2];
1318 translate[0] = -1.0f - draw_data->DisplayPos.x * scale[0];
1319 translate[1] = -1.0f - draw_data->DisplayPos.y * scale[1];
1320 device_data->vtable.CmdPushConstants(draw->command_buffer, data->pipeline_layout,
1321 VK_SHADER_STAGE_VERTEX_BIT,
1322 sizeof(float) * 0, sizeof(float) * 2, scale);
1323 device_data->vtable.CmdPushConstants(draw->command_buffer, data->pipeline_layout,
1324 VK_SHADER_STAGE_VERTEX_BIT,
1325 sizeof(float) * 2, sizeof(float) * 2, translate);
1326
1327 // Render the command lists:
1328 int vtx_offset = 0;
1329 int idx_offset = 0;
1330 ImVec2 display_pos = draw_data->DisplayPos;
1331 for (int n = 0; n < draw_data->CmdListsCount; n++)
1332 {
1333 const ImDrawList* cmd_list = draw_data->CmdLists[n];
1334 for (int cmd_i = 0; cmd_i < cmd_list->CmdBuffer.Size; cmd_i++)
1335 {
1336 const ImDrawCmd* pcmd = &cmd_list->CmdBuffer[cmd_i];
1337 // Apply scissor/clipping rectangle
1338 // FIXME: We could clamp width/height based on clamped min/max values.
1339 VkRect2D scissor;
1340 scissor.offset.x = (int32_t)(pcmd->ClipRect.x - display_pos.x) > 0 ? (int32_t)(pcmd->ClipRect.x - display_pos.x) : 0;
1341 scissor.offset.y = (int32_t)(pcmd->ClipRect.y - display_pos.y) > 0 ? (int32_t)(pcmd->ClipRect.y - display_pos.y) : 0;
1342 scissor.extent.width = (uint32_t)(pcmd->ClipRect.z - pcmd->ClipRect.x);
1343 scissor.extent.height = (uint32_t)(pcmd->ClipRect.w - pcmd->ClipRect.y + 1); // FIXME: Why +1 here?
1344 device_data->vtable.CmdSetScissor(draw->command_buffer, 0, 1, &scissor);
1345
1346 // Draw
1347 device_data->vtable.CmdDrawIndexed(draw->command_buffer, pcmd->ElemCount, 1, idx_offset, vtx_offset, 0);
1348
1349 idx_offset += pcmd->ElemCount;
1350 }
1351 vtx_offset += cmd_list->VtxBuffer.Size;
1352 }
1353
1354 device_data->vtable.CmdEndRenderPass(draw->command_buffer);
1355
1356 if (device_data->graphic_queue->family_index != present_queue->family_index)
1357 {
1358 /* Transfer the image back to the present queue family
1359 * image layout was already changed to present by the render pass
1360 */
1361 imb.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1362 imb.pNext = nullptr;
1363 imb.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1364 imb.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1365 imb.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1366 imb.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1367 imb.image = data->images[image_index];
1368 imb.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1369 imb.subresourceRange.baseMipLevel = 0;
1370 imb.subresourceRange.levelCount = 1;
1371 imb.subresourceRange.baseArrayLayer = 0;
1372 imb.subresourceRange.layerCount = 1;
1373 imb.srcQueueFamilyIndex = device_data->graphic_queue->family_index;
1374 imb.dstQueueFamilyIndex = present_queue->family_index;
1375 device_data->vtable.CmdPipelineBarrier(draw->command_buffer,
1376 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1377 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1378 0, /* dependency flags */
1379 0, nullptr, /* memory barriers */
1380 0, nullptr, /* buffer memory barriers */
1381 1, &imb); /* image memory barriers */
1382 }
1383
1384 device_data->vtable.EndCommandBuffer(draw->command_buffer);
1385
1386 /* When presenting on a different queue than where we're drawing the
1387 * overlay *AND* when the application does not provide a semaphore to
1388 * vkQueuePresent, insert our own cross engine synchronization
1389 * semaphore.
1390 */
1391 if (n_wait_semaphores == 0 && device_data->graphic_queue->queue != present_queue->queue) {
1392 VkPipelineStageFlags stages_wait = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
1393 VkSubmitInfo submit_info = {};
1394 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1395 submit_info.commandBufferCount = 0;
1396 submit_info.pWaitDstStageMask = &stages_wait;
1397 submit_info.waitSemaphoreCount = 0;
1398 submit_info.signalSemaphoreCount = 1;
1399 submit_info.pSignalSemaphores = &draw->cross_engine_semaphore;
1400
1401 device_data->vtable.QueueSubmit(present_queue->queue, 1, &submit_info, VK_NULL_HANDLE);
1402
1403 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1404 submit_info.commandBufferCount = 1;
1405 submit_info.pWaitDstStageMask = &stages_wait;
1406 submit_info.pCommandBuffers = &draw->command_buffer;
1407 submit_info.waitSemaphoreCount = 1;
1408 submit_info.pWaitSemaphores = &draw->cross_engine_semaphore;
1409 submit_info.signalSemaphoreCount = 1;
1410 submit_info.pSignalSemaphores = &draw->semaphore;
1411
1412 device_data->vtable.QueueSubmit(device_data->graphic_queue->queue, 1, &submit_info, draw->fence);
1413 } else {
1414 VkPipelineStageFlags *stages_wait = (VkPipelineStageFlags*) malloc(sizeof(VkPipelineStageFlags) * n_wait_semaphores);
1415 for (unsigned i = 0; i < n_wait_semaphores; i++)
1416 {
1417 // wait in the fragment stage until the swapchain image is ready
1418 stages_wait[i] = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1419 }
1420
1421 VkSubmitInfo submit_info = {};
1422 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1423 submit_info.commandBufferCount = 1;
1424 submit_info.pCommandBuffers = &draw->command_buffer;
1425 submit_info.pWaitDstStageMask = stages_wait;
1426 submit_info.waitSemaphoreCount = n_wait_semaphores;
1427 submit_info.pWaitSemaphores = wait_semaphores;
1428 submit_info.signalSemaphoreCount = 1;
1429 submit_info.pSignalSemaphores = &draw->semaphore;
1430
1431 device_data->vtable.QueueSubmit(device_data->graphic_queue->queue, 1, &submit_info, draw->fence);
1432
1433 free(stages_wait);
1434 }
1435
1436 return draw;
1437 }
1438
1439 static const uint32_t overlay_vert_spv[] = {
1440 #include "overlay.vert.spv.h"
1441 };
1442 static const uint32_t overlay_frag_spv[] = {
1443 #include "overlay.frag.spv.h"
1444 };
1445
setup_swapchain_data_pipeline(struct swapchain_data * data)1446 static void setup_swapchain_data_pipeline(struct swapchain_data *data)
1447 {
1448 struct device_data *device_data = data->device;
1449 VkShaderModule vert_module, frag_module;
1450
1451 /* Create shader modules */
1452 VkShaderModuleCreateInfo vert_info = {};
1453 vert_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
1454 vert_info.codeSize = sizeof(overlay_vert_spv);
1455 vert_info.pCode = overlay_vert_spv;
1456 VK_CHECK(device_data->vtable.CreateShaderModule(device_data->device,
1457 &vert_info, NULL, &vert_module));
1458 VkShaderModuleCreateInfo frag_info = {};
1459 frag_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
1460 frag_info.codeSize = sizeof(overlay_frag_spv);
1461 frag_info.pCode = (uint32_t*)overlay_frag_spv;
1462 VK_CHECK(device_data->vtable.CreateShaderModule(device_data->device,
1463 &frag_info, NULL, &frag_module));
1464
1465 /* Font sampler */
1466 VkSamplerCreateInfo sampler_info = {};
1467 sampler_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
1468 sampler_info.magFilter = VK_FILTER_LINEAR;
1469 sampler_info.minFilter = VK_FILTER_LINEAR;
1470 sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
1471 sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
1472 sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
1473 sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
1474 sampler_info.minLod = -1000;
1475 sampler_info.maxLod = 1000;
1476 sampler_info.maxAnisotropy = 1.0f;
1477 VK_CHECK(device_data->vtable.CreateSampler(device_data->device, &sampler_info,
1478 NULL, &data->font_sampler));
1479
1480 /* Descriptor pool */
1481 VkDescriptorPoolSize sampler_pool_size = {};
1482 sampler_pool_size.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1483 sampler_pool_size.descriptorCount = 1;
1484 VkDescriptorPoolCreateInfo desc_pool_info = {};
1485 desc_pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1486 desc_pool_info.maxSets = 1;
1487 desc_pool_info.poolSizeCount = 1;
1488 desc_pool_info.pPoolSizes = &sampler_pool_size;
1489 VK_CHECK(device_data->vtable.CreateDescriptorPool(device_data->device,
1490 &desc_pool_info,
1491 NULL, &data->descriptor_pool));
1492
1493 /* Descriptor layout */
1494 VkSampler sampler[1] = { data->font_sampler };
1495 VkDescriptorSetLayoutBinding binding[1] = {};
1496 binding[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1497 binding[0].descriptorCount = 1;
1498 binding[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
1499 binding[0].pImmutableSamplers = sampler;
1500 VkDescriptorSetLayoutCreateInfo set_layout_info = {};
1501 set_layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
1502 set_layout_info.bindingCount = 1;
1503 set_layout_info.pBindings = binding;
1504 VK_CHECK(device_data->vtable.CreateDescriptorSetLayout(device_data->device,
1505 &set_layout_info,
1506 NULL, &data->descriptor_layout));
1507
1508 /* Descriptor set */
1509 VkDescriptorSetAllocateInfo alloc_info = {};
1510 alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
1511 alloc_info.descriptorPool = data->descriptor_pool;
1512 alloc_info.descriptorSetCount = 1;
1513 alloc_info.pSetLayouts = &data->descriptor_layout;
1514 VK_CHECK(device_data->vtable.AllocateDescriptorSets(device_data->device,
1515 &alloc_info,
1516 &data->descriptor_set));
1517
1518 /* Constants: we are using 'vec2 offset' and 'vec2 scale' instead of a full
1519 * 3d projection matrix
1520 */
1521 VkPushConstantRange push_constants[1] = {};
1522 push_constants[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
1523 push_constants[0].offset = sizeof(float) * 0;
1524 push_constants[0].size = sizeof(float) * 4;
1525 VkPipelineLayoutCreateInfo layout_info = {};
1526 layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
1527 layout_info.setLayoutCount = 1;
1528 layout_info.pSetLayouts = &data->descriptor_layout;
1529 layout_info.pushConstantRangeCount = 1;
1530 layout_info.pPushConstantRanges = push_constants;
1531 VK_CHECK(device_data->vtable.CreatePipelineLayout(device_data->device,
1532 &layout_info,
1533 NULL, &data->pipeline_layout));
1534
1535 VkPipelineShaderStageCreateInfo stage[2] = {};
1536 stage[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1537 stage[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
1538 stage[0].module = vert_module;
1539 stage[0].pName = "main";
1540 stage[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1541 stage[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
1542 stage[1].module = frag_module;
1543 stage[1].pName = "main";
1544
1545 VkVertexInputBindingDescription binding_desc[1] = {};
1546 binding_desc[0].stride = sizeof(ImDrawVert);
1547 binding_desc[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
1548
1549 VkVertexInputAttributeDescription attribute_desc[3] = {};
1550 attribute_desc[0].location = 0;
1551 attribute_desc[0].binding = binding_desc[0].binding;
1552 attribute_desc[0].format = VK_FORMAT_R32G32_SFLOAT;
1553 attribute_desc[0].offset = IM_OFFSETOF(ImDrawVert, pos);
1554 attribute_desc[1].location = 1;
1555 attribute_desc[1].binding = binding_desc[0].binding;
1556 attribute_desc[1].format = VK_FORMAT_R32G32_SFLOAT;
1557 attribute_desc[1].offset = IM_OFFSETOF(ImDrawVert, uv);
1558 attribute_desc[2].location = 2;
1559 attribute_desc[2].binding = binding_desc[0].binding;
1560 attribute_desc[2].format = VK_FORMAT_R8G8B8A8_UNORM;
1561 attribute_desc[2].offset = IM_OFFSETOF(ImDrawVert, col);
1562
1563 VkPipelineVertexInputStateCreateInfo vertex_info = {};
1564 vertex_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
1565 vertex_info.vertexBindingDescriptionCount = 1;
1566 vertex_info.pVertexBindingDescriptions = binding_desc;
1567 vertex_info.vertexAttributeDescriptionCount = 3;
1568 vertex_info.pVertexAttributeDescriptions = attribute_desc;
1569
1570 VkPipelineInputAssemblyStateCreateInfo ia_info = {};
1571 ia_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
1572 ia_info.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
1573
1574 VkPipelineViewportStateCreateInfo viewport_info = {};
1575 viewport_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
1576 viewport_info.viewportCount = 1;
1577 viewport_info.scissorCount = 1;
1578
1579 VkPipelineRasterizationStateCreateInfo raster_info = {};
1580 raster_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
1581 raster_info.polygonMode = VK_POLYGON_MODE_FILL;
1582 raster_info.cullMode = VK_CULL_MODE_NONE;
1583 raster_info.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
1584 raster_info.lineWidth = 1.0f;
1585
1586 VkPipelineMultisampleStateCreateInfo ms_info = {};
1587 ms_info.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
1588 ms_info.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
1589
1590 VkPipelineColorBlendAttachmentState color_attachment[1] = {};
1591 color_attachment[0].blendEnable = VK_TRUE;
1592 color_attachment[0].srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
1593 color_attachment[0].dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
1594 color_attachment[0].colorBlendOp = VK_BLEND_OP_ADD;
1595 color_attachment[0].srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
1596 color_attachment[0].dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
1597 color_attachment[0].alphaBlendOp = VK_BLEND_OP_ADD;
1598 color_attachment[0].colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
1599 VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
1600
1601 VkPipelineDepthStencilStateCreateInfo depth_info = {};
1602 depth_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
1603
1604 VkPipelineColorBlendStateCreateInfo blend_info = {};
1605 blend_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
1606 blend_info.attachmentCount = 1;
1607 blend_info.pAttachments = color_attachment;
1608
1609 VkDynamicState dynamic_states[2] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
1610 VkPipelineDynamicStateCreateInfo dynamic_state = {};
1611 dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
1612 dynamic_state.dynamicStateCount = (uint32_t)IM_ARRAYSIZE(dynamic_states);
1613 dynamic_state.pDynamicStates = dynamic_states;
1614
1615 VkGraphicsPipelineCreateInfo info = {};
1616 info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
1617 info.flags = 0;
1618 info.stageCount = 2;
1619 info.pStages = stage;
1620 info.pVertexInputState = &vertex_info;
1621 info.pInputAssemblyState = &ia_info;
1622 info.pViewportState = &viewport_info;
1623 info.pRasterizationState = &raster_info;
1624 info.pMultisampleState = &ms_info;
1625 info.pDepthStencilState = &depth_info;
1626 info.pColorBlendState = &blend_info;
1627 info.pDynamicState = &dynamic_state;
1628 info.layout = data->pipeline_layout;
1629 info.renderPass = data->render_pass;
1630 VK_CHECK(
1631 device_data->vtable.CreateGraphicsPipelines(device_data->device, VK_NULL_HANDLE,
1632 1, &info,
1633 NULL, &data->pipeline));
1634
1635 device_data->vtable.DestroyShaderModule(device_data->device, vert_module, NULL);
1636 device_data->vtable.DestroyShaderModule(device_data->device, frag_module, NULL);
1637
1638 ImGuiIO& io = ImGui::GetIO();
1639 unsigned char* pixels;
1640 int width, height;
1641 io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height);
1642
1643 /* Font image */
1644 VkImageCreateInfo image_info = {};
1645 image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
1646 image_info.imageType = VK_IMAGE_TYPE_2D;
1647 image_info.format = VK_FORMAT_R8G8B8A8_UNORM;
1648 image_info.extent.width = width;
1649 image_info.extent.height = height;
1650 image_info.extent.depth = 1;
1651 image_info.mipLevels = 1;
1652 image_info.arrayLayers = 1;
1653 image_info.samples = VK_SAMPLE_COUNT_1_BIT;
1654 image_info.tiling = VK_IMAGE_TILING_OPTIMAL;
1655 image_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1656 image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1657 image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1658 VK_CHECK(device_data->vtable.CreateImage(device_data->device, &image_info,
1659 NULL, &data->font_image));
1660 VkMemoryRequirements font_image_req;
1661 device_data->vtable.GetImageMemoryRequirements(device_data->device,
1662 data->font_image, &font_image_req);
1663 VkMemoryAllocateInfo image_alloc_info = {};
1664 image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
1665 image_alloc_info.allocationSize = font_image_req.size;
1666 image_alloc_info.memoryTypeIndex = vk_memory_type(device_data,
1667 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
1668 font_image_req.memoryTypeBits);
1669 VK_CHECK(device_data->vtable.AllocateMemory(device_data->device, &image_alloc_info,
1670 NULL, &data->font_mem));
1671 VK_CHECK(device_data->vtable.BindImageMemory(device_data->device,
1672 data->font_image,
1673 data->font_mem, 0));
1674
1675 /* Font image view */
1676 VkImageViewCreateInfo view_info = {};
1677 view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
1678 view_info.image = data->font_image;
1679 view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
1680 view_info.format = VK_FORMAT_R8G8B8A8_UNORM;
1681 view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1682 view_info.subresourceRange.levelCount = 1;
1683 view_info.subresourceRange.layerCount = 1;
1684 VK_CHECK(device_data->vtable.CreateImageView(device_data->device, &view_info,
1685 NULL, &data->font_image_view));
1686
1687 /* Descriptor set */
1688 VkDescriptorImageInfo desc_image[1] = {};
1689 desc_image[0].sampler = data->font_sampler;
1690 desc_image[0].imageView = data->font_image_view;
1691 desc_image[0].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1692 VkWriteDescriptorSet write_desc[1] = {};
1693 write_desc[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1694 write_desc[0].dstSet = data->descriptor_set;
1695 write_desc[0].descriptorCount = 1;
1696 write_desc[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1697 write_desc[0].pImageInfo = desc_image;
1698 device_data->vtable.UpdateDescriptorSets(device_data->device, 1, write_desc, 0, NULL);
1699 }
1700
setup_swapchain_data(struct swapchain_data * data,const VkSwapchainCreateInfoKHR * pCreateInfo)1701 static void setup_swapchain_data(struct swapchain_data *data,
1702 const VkSwapchainCreateInfoKHR *pCreateInfo)
1703 {
1704 data->width = pCreateInfo->imageExtent.width;
1705 data->height = pCreateInfo->imageExtent.height;
1706 data->format = pCreateInfo->imageFormat;
1707
1708 data->imgui_context = ImGui::CreateContext();
1709 ImGui::SetCurrentContext(data->imgui_context);
1710
1711 ImGui::GetIO().IniFilename = NULL;
1712 ImGui::GetIO().DisplaySize = ImVec2((float)data->width, (float)data->height);
1713
1714 struct device_data *device_data = data->device;
1715
1716 /* Render pass */
1717 VkAttachmentDescription attachment_desc = {};
1718 attachment_desc.format = pCreateInfo->imageFormat;
1719 attachment_desc.samples = VK_SAMPLE_COUNT_1_BIT;
1720 attachment_desc.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
1721 attachment_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
1722 attachment_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
1723 attachment_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
1724 attachment_desc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1725 attachment_desc.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1726 VkAttachmentReference color_attachment = {};
1727 color_attachment.attachment = 0;
1728 color_attachment.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1729 VkSubpassDescription subpass = {};
1730 subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1731 subpass.colorAttachmentCount = 1;
1732 subpass.pColorAttachments = &color_attachment;
1733 VkSubpassDependency dependency = {};
1734 dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
1735 dependency.dstSubpass = 0;
1736 dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1737 dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1738 dependency.srcAccessMask = 0;
1739 dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1740 VkRenderPassCreateInfo render_pass_info = {};
1741 render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
1742 render_pass_info.attachmentCount = 1;
1743 render_pass_info.pAttachments = &attachment_desc;
1744 render_pass_info.subpassCount = 1;
1745 render_pass_info.pSubpasses = &subpass;
1746 render_pass_info.dependencyCount = 1;
1747 render_pass_info.pDependencies = &dependency;
1748 VK_CHECK(device_data->vtable.CreateRenderPass(device_data->device,
1749 &render_pass_info,
1750 NULL, &data->render_pass));
1751
1752 setup_swapchain_data_pipeline(data);
1753
1754 VK_CHECK(device_data->vtable.GetSwapchainImagesKHR(device_data->device,
1755 data->swapchain,
1756 &data->n_images,
1757 NULL));
1758
1759 data->images = ralloc_array(data, VkImage, data->n_images);
1760 data->image_views = ralloc_array(data, VkImageView, data->n_images);
1761 data->framebuffers = ralloc_array(data, VkFramebuffer, data->n_images);
1762
1763 VK_CHECK(device_data->vtable.GetSwapchainImagesKHR(device_data->device,
1764 data->swapchain,
1765 &data->n_images,
1766 data->images));
1767
1768 /* Image views */
1769 VkImageViewCreateInfo view_info = {};
1770 view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
1771 view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
1772 view_info.format = pCreateInfo->imageFormat;
1773 view_info.components.r = VK_COMPONENT_SWIZZLE_R;
1774 view_info.components.g = VK_COMPONENT_SWIZZLE_G;
1775 view_info.components.b = VK_COMPONENT_SWIZZLE_B;
1776 view_info.components.a = VK_COMPONENT_SWIZZLE_A;
1777 view_info.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
1778 for (uint32_t i = 0; i < data->n_images; i++) {
1779 view_info.image = data->images[i];
1780 VK_CHECK(device_data->vtable.CreateImageView(device_data->device,
1781 &view_info, NULL,
1782 &data->image_views[i]));
1783 }
1784
1785 /* Framebuffers */
1786 VkImageView attachment[1];
1787 VkFramebufferCreateInfo fb_info = {};
1788 fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
1789 fb_info.renderPass = data->render_pass;
1790 fb_info.attachmentCount = 1;
1791 fb_info.pAttachments = attachment;
1792 fb_info.width = data->width;
1793 fb_info.height = data->height;
1794 fb_info.layers = 1;
1795 for (uint32_t i = 0; i < data->n_images; i++) {
1796 attachment[0] = data->image_views[i];
1797 VK_CHECK(device_data->vtable.CreateFramebuffer(device_data->device, &fb_info,
1798 NULL, &data->framebuffers[i]));
1799 }
1800
1801 /* Command buffer pool */
1802 VkCommandPoolCreateInfo cmd_buffer_pool_info = {};
1803 cmd_buffer_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1804 cmd_buffer_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1805 cmd_buffer_pool_info.queueFamilyIndex = device_data->graphic_queue->family_index;
1806 VK_CHECK(device_data->vtable.CreateCommandPool(device_data->device,
1807 &cmd_buffer_pool_info,
1808 NULL, &data->command_pool));
1809 }
1810
shutdown_swapchain_data(struct swapchain_data * data)1811 static void shutdown_swapchain_data(struct swapchain_data *data)
1812 {
1813 struct device_data *device_data = data->device;
1814
1815 list_for_each_entry_safe(struct overlay_draw, draw, &data->draws, link) {
1816 device_data->vtable.DestroySemaphore(device_data->device, draw->cross_engine_semaphore, NULL);
1817 device_data->vtable.DestroySemaphore(device_data->device, draw->semaphore, NULL);
1818 device_data->vtable.DestroyFence(device_data->device, draw->fence, NULL);
1819 device_data->vtable.DestroyBuffer(device_data->device, draw->vertex_buffer, NULL);
1820 device_data->vtable.DestroyBuffer(device_data->device, draw->index_buffer, NULL);
1821 device_data->vtable.FreeMemory(device_data->device, draw->vertex_buffer_mem, NULL);
1822 device_data->vtable.FreeMemory(device_data->device, draw->index_buffer_mem, NULL);
1823 }
1824
1825 for (uint32_t i = 0; i < data->n_images; i++) {
1826 device_data->vtable.DestroyImageView(device_data->device, data->image_views[i], NULL);
1827 device_data->vtable.DestroyFramebuffer(device_data->device, data->framebuffers[i], NULL);
1828 }
1829
1830 device_data->vtable.DestroyRenderPass(device_data->device, data->render_pass, NULL);
1831
1832 device_data->vtable.DestroyCommandPool(device_data->device, data->command_pool, NULL);
1833
1834 device_data->vtable.DestroyPipeline(device_data->device, data->pipeline, NULL);
1835 device_data->vtable.DestroyPipelineLayout(device_data->device, data->pipeline_layout, NULL);
1836
1837 device_data->vtable.DestroyDescriptorPool(device_data->device,
1838 data->descriptor_pool, NULL);
1839 device_data->vtable.DestroyDescriptorSetLayout(device_data->device,
1840 data->descriptor_layout, NULL);
1841
1842 device_data->vtable.DestroySampler(device_data->device, data->font_sampler, NULL);
1843 device_data->vtable.DestroyImageView(device_data->device, data->font_image_view, NULL);
1844 device_data->vtable.DestroyImage(device_data->device, data->font_image, NULL);
1845 device_data->vtable.FreeMemory(device_data->device, data->font_mem, NULL);
1846
1847 device_data->vtable.DestroyBuffer(device_data->device, data->upload_font_buffer, NULL);
1848 device_data->vtable.FreeMemory(device_data->device, data->upload_font_buffer_mem, NULL);
1849
1850 ImGui::DestroyContext(data->imgui_context);
1851 }
1852
before_present(struct swapchain_data * swapchain_data,struct queue_data * present_queue,const VkSemaphore * wait_semaphores,unsigned n_wait_semaphores,unsigned imageIndex)1853 static struct overlay_draw *before_present(struct swapchain_data *swapchain_data,
1854 struct queue_data *present_queue,
1855 const VkSemaphore *wait_semaphores,
1856 unsigned n_wait_semaphores,
1857 unsigned imageIndex)
1858 {
1859 struct instance_data *instance_data = swapchain_data->device->instance;
1860 struct overlay_draw *draw = NULL;
1861
1862 snapshot_swapchain_frame(swapchain_data);
1863
1864 if (!instance_data->params.no_display && swapchain_data->n_frames > 0) {
1865 compute_swapchain_display(swapchain_data);
1866 draw = render_swapchain_display(swapchain_data, present_queue,
1867 wait_semaphores, n_wait_semaphores,
1868 imageIndex);
1869 }
1870
1871 return draw;
1872 }
1873
overlay_CreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)1874 static VkResult overlay_CreateSwapchainKHR(
1875 VkDevice device,
1876 const VkSwapchainCreateInfoKHR* pCreateInfo,
1877 const VkAllocationCallbacks* pAllocator,
1878 VkSwapchainKHR* pSwapchain)
1879 {
1880 struct device_data *device_data = FIND(struct device_data, device);
1881 VkResult result = device_data->vtable.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
1882 if (result != VK_SUCCESS) return result;
1883
1884 struct swapchain_data *swapchain_data = new_swapchain_data(*pSwapchain, device_data);
1885 setup_swapchain_data(swapchain_data, pCreateInfo);
1886 return result;
1887 }
1888
overlay_DestroySwapchainKHR(VkDevice device,VkSwapchainKHR swapchain,const VkAllocationCallbacks * pAllocator)1889 static void overlay_DestroySwapchainKHR(
1890 VkDevice device,
1891 VkSwapchainKHR swapchain,
1892 const VkAllocationCallbacks* pAllocator)
1893 {
1894 if (swapchain == VK_NULL_HANDLE) {
1895 struct device_data *device_data = FIND(struct device_data, device);
1896 device_data->vtable.DestroySwapchainKHR(device, swapchain, pAllocator);
1897 return;
1898 }
1899
1900 struct swapchain_data *swapchain_data =
1901 FIND(struct swapchain_data, swapchain);
1902
1903 shutdown_swapchain_data(swapchain_data);
1904 swapchain_data->device->vtable.DestroySwapchainKHR(device, swapchain, pAllocator);
1905 destroy_swapchain_data(swapchain_data);
1906 }
1907
overlay_QueuePresentKHR(VkQueue queue,const VkPresentInfoKHR * pPresentInfo)1908 static VkResult overlay_QueuePresentKHR(
1909 VkQueue queue,
1910 const VkPresentInfoKHR* pPresentInfo)
1911 {
1912 struct queue_data *queue_data = FIND(struct queue_data, queue);
1913 struct device_data *device_data = queue_data->device;
1914 struct instance_data *instance_data = device_data->instance;
1915 uint32_t query_results[OVERLAY_QUERY_COUNT];
1916
1917 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_frame]++;
1918
1919 if (list_length(&queue_data->running_command_buffer) > 0) {
1920 /* Before getting the query results, make sure the operations have
1921 * completed.
1922 */
1923 VK_CHECK(device_data->vtable.ResetFences(device_data->device,
1924 1, &queue_data->queries_fence));
1925 VK_CHECK(device_data->vtable.QueueSubmit(queue, 0, NULL, queue_data->queries_fence));
1926 VK_CHECK(device_data->vtable.WaitForFences(device_data->device,
1927 1, &queue_data->queries_fence,
1928 VK_FALSE, UINT64_MAX));
1929
1930 /* Now get the results. */
1931 list_for_each_entry_safe(struct command_buffer_data, cmd_buffer_data,
1932 &queue_data->running_command_buffer, link) {
1933 list_delinit(&cmd_buffer_data->link);
1934
1935 if (cmd_buffer_data->pipeline_query_pool) {
1936 memset(query_results, 0, sizeof(query_results));
1937 VK_CHECK(device_data->vtable.GetQueryPoolResults(device_data->device,
1938 cmd_buffer_data->pipeline_query_pool,
1939 cmd_buffer_data->query_index, 1,
1940 sizeof(uint32_t) * OVERLAY_QUERY_COUNT,
1941 query_results, 0, VK_QUERY_RESULT_WAIT_BIT));
1942
1943 for (uint32_t i = OVERLAY_PARAM_ENABLED_vertices;
1944 i <= OVERLAY_PARAM_ENABLED_compute_invocations; i++) {
1945 device_data->frame_stats.stats[i] += query_results[i - OVERLAY_PARAM_ENABLED_vertices];
1946 }
1947 }
1948 if (cmd_buffer_data->timestamp_query_pool) {
1949 uint64_t gpu_timestamps[2] = { 0 };
1950 VK_CHECK(device_data->vtable.GetQueryPoolResults(device_data->device,
1951 cmd_buffer_data->timestamp_query_pool,
1952 cmd_buffer_data->query_index * 2, 2,
1953 2 * sizeof(uint64_t), gpu_timestamps, sizeof(uint64_t),
1954 VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT));
1955
1956 gpu_timestamps[0] &= queue_data->timestamp_mask;
1957 gpu_timestamps[1] &= queue_data->timestamp_mask;
1958 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_gpu_timing] +=
1959 (gpu_timestamps[1] - gpu_timestamps[0]) *
1960 device_data->properties.limits.timestampPeriod;
1961 }
1962 }
1963 }
1964
1965 /* Otherwise we need to add our overlay drawing semaphore to the list of
1966 * semaphores to wait on. If we don't do that the presented picture might
1967 * be have incomplete overlay drawings.
1968 */
1969 VkResult result = VK_SUCCESS;
1970 if (instance_data->params.no_display) {
1971 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
1972 VkSwapchainKHR swapchain = pPresentInfo->pSwapchains[i];
1973 struct swapchain_data *swapchain_data =
1974 FIND(struct swapchain_data, swapchain);
1975
1976 uint32_t image_index = pPresentInfo->pImageIndices[i];
1977
1978 before_present(swapchain_data,
1979 queue_data,
1980 pPresentInfo->pWaitSemaphores,
1981 pPresentInfo->waitSemaphoreCount,
1982 image_index);
1983
1984 VkPresentInfoKHR present_info = *pPresentInfo;
1985 present_info.swapchainCount = 1;
1986 present_info.pSwapchains = &swapchain;
1987 present_info.pImageIndices = &image_index;
1988
1989 uint64_t ts0 = os_time_get();
1990 result = queue_data->device->vtable.QueuePresentKHR(queue, &present_info);
1991 uint64_t ts1 = os_time_get();
1992 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_present_timing] += ts1 - ts0;
1993 }
1994 } else {
1995 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
1996 VkSwapchainKHR swapchain = pPresentInfo->pSwapchains[i];
1997 struct swapchain_data *swapchain_data =
1998 FIND(struct swapchain_data, swapchain);
1999
2000 uint32_t image_index = pPresentInfo->pImageIndices[i];
2001
2002 VkPresentInfoKHR present_info = *pPresentInfo;
2003 present_info.swapchainCount = 1;
2004 present_info.pSwapchains = &swapchain;
2005 present_info.pImageIndices = &image_index;
2006
2007 struct overlay_draw *draw = before_present(swapchain_data,
2008 queue_data,
2009 pPresentInfo->pWaitSemaphores,
2010 pPresentInfo->waitSemaphoreCount,
2011 image_index);
2012
2013 /* Because the submission of the overlay draw waits on the semaphores
2014 * handed for present, we don't need to have this present operation
2015 * wait on them as well, we can just wait on the overlay submission
2016 * semaphore.
2017 */
2018 present_info.pWaitSemaphores = &draw->semaphore;
2019 present_info.waitSemaphoreCount = 1;
2020
2021 uint64_t ts0 = os_time_get();
2022 VkResult chain_result = queue_data->device->vtable.QueuePresentKHR(queue, &present_info);
2023 uint64_t ts1 = os_time_get();
2024 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_present_timing] += ts1 - ts0;
2025 if (pPresentInfo->pResults)
2026 pPresentInfo->pResults[i] = chain_result;
2027 if (chain_result != VK_SUCCESS && result == VK_SUCCESS)
2028 result = chain_result;
2029 }
2030 }
2031 return result;
2032 }
2033
overlay_AcquireNextImageKHR(VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)2034 static VkResult overlay_AcquireNextImageKHR(
2035 VkDevice device,
2036 VkSwapchainKHR swapchain,
2037 uint64_t timeout,
2038 VkSemaphore semaphore,
2039 VkFence fence,
2040 uint32_t* pImageIndex)
2041 {
2042 struct swapchain_data *swapchain_data =
2043 FIND(struct swapchain_data, swapchain);
2044 struct device_data *device_data = swapchain_data->device;
2045
2046 uint64_t ts0 = os_time_get();
2047 VkResult result = device_data->vtable.AcquireNextImageKHR(device, swapchain, timeout,
2048 semaphore, fence, pImageIndex);
2049 uint64_t ts1 = os_time_get();
2050
2051 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire_timing] += ts1 - ts0;
2052 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire]++;
2053
2054 return result;
2055 }
2056
overlay_AcquireNextImage2KHR(VkDevice device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)2057 static VkResult overlay_AcquireNextImage2KHR(
2058 VkDevice device,
2059 const VkAcquireNextImageInfoKHR* pAcquireInfo,
2060 uint32_t* pImageIndex)
2061 {
2062 struct swapchain_data *swapchain_data =
2063 FIND(struct swapchain_data, pAcquireInfo->swapchain);
2064 struct device_data *device_data = swapchain_data->device;
2065
2066 uint64_t ts0 = os_time_get();
2067 VkResult result = device_data->vtable.AcquireNextImage2KHR(device, pAcquireInfo, pImageIndex);
2068 uint64_t ts1 = os_time_get();
2069
2070 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire_timing] += ts1 - ts0;
2071 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire]++;
2072
2073 return result;
2074 }
2075
overlay_CmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)2076 static void overlay_CmdDraw(
2077 VkCommandBuffer commandBuffer,
2078 uint32_t vertexCount,
2079 uint32_t instanceCount,
2080 uint32_t firstVertex,
2081 uint32_t firstInstance)
2082 {
2083 struct command_buffer_data *cmd_buffer_data =
2084 FIND(struct command_buffer_data, commandBuffer);
2085 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw]++;
2086 struct device_data *device_data = cmd_buffer_data->device;
2087 device_data->vtable.CmdDraw(commandBuffer, vertexCount, instanceCount,
2088 firstVertex, firstInstance);
2089 }
2090
overlay_CmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)2091 static void overlay_CmdDrawIndexed(
2092 VkCommandBuffer commandBuffer,
2093 uint32_t indexCount,
2094 uint32_t instanceCount,
2095 uint32_t firstIndex,
2096 int32_t vertexOffset,
2097 uint32_t firstInstance)
2098 {
2099 struct command_buffer_data *cmd_buffer_data =
2100 FIND(struct command_buffer_data, commandBuffer);
2101 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indexed]++;
2102 struct device_data *device_data = cmd_buffer_data->device;
2103 device_data->vtable.CmdDrawIndexed(commandBuffer, indexCount, instanceCount,
2104 firstIndex, vertexOffset, firstInstance);
2105 }
2106
overlay_CmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)2107 static void overlay_CmdDrawIndirect(
2108 VkCommandBuffer commandBuffer,
2109 VkBuffer buffer,
2110 VkDeviceSize offset,
2111 uint32_t drawCount,
2112 uint32_t stride)
2113 {
2114 struct command_buffer_data *cmd_buffer_data =
2115 FIND(struct command_buffer_data, commandBuffer);
2116 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indirect]++;
2117 struct device_data *device_data = cmd_buffer_data->device;
2118 device_data->vtable.CmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
2119 }
2120
overlay_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)2121 static void overlay_CmdDrawIndexedIndirect(
2122 VkCommandBuffer commandBuffer,
2123 VkBuffer buffer,
2124 VkDeviceSize offset,
2125 uint32_t drawCount,
2126 uint32_t stride)
2127 {
2128 struct command_buffer_data *cmd_buffer_data =
2129 FIND(struct command_buffer_data, commandBuffer);
2130 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indexed_indirect]++;
2131 struct device_data *device_data = cmd_buffer_data->device;
2132 device_data->vtable.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
2133 }
2134
overlay_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)2135 static void overlay_CmdDrawIndirectCount(
2136 VkCommandBuffer commandBuffer,
2137 VkBuffer buffer,
2138 VkDeviceSize offset,
2139 VkBuffer countBuffer,
2140 VkDeviceSize countBufferOffset,
2141 uint32_t maxDrawCount,
2142 uint32_t stride)
2143 {
2144 struct command_buffer_data *cmd_buffer_data =
2145 FIND(struct command_buffer_data, commandBuffer);
2146 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indirect_count]++;
2147 struct device_data *device_data = cmd_buffer_data->device;
2148 device_data->vtable.CmdDrawIndirectCount(commandBuffer, buffer, offset,
2149 countBuffer, countBufferOffset,
2150 maxDrawCount, stride);
2151 }
2152
overlay_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)2153 static void overlay_CmdDrawIndexedIndirectCount(
2154 VkCommandBuffer commandBuffer,
2155 VkBuffer buffer,
2156 VkDeviceSize offset,
2157 VkBuffer countBuffer,
2158 VkDeviceSize countBufferOffset,
2159 uint32_t maxDrawCount,
2160 uint32_t stride)
2161 {
2162 struct command_buffer_data *cmd_buffer_data =
2163 FIND(struct command_buffer_data, commandBuffer);
2164 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indexed_indirect_count]++;
2165 struct device_data *device_data = cmd_buffer_data->device;
2166 device_data->vtable.CmdDrawIndexedIndirectCount(commandBuffer, buffer, offset,
2167 countBuffer, countBufferOffset,
2168 maxDrawCount, stride);
2169 }
2170
overlay_CmdDispatch(VkCommandBuffer commandBuffer,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ)2171 static void overlay_CmdDispatch(
2172 VkCommandBuffer commandBuffer,
2173 uint32_t groupCountX,
2174 uint32_t groupCountY,
2175 uint32_t groupCountZ)
2176 {
2177 struct command_buffer_data *cmd_buffer_data =
2178 FIND(struct command_buffer_data, commandBuffer);
2179 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_dispatch]++;
2180 struct device_data *device_data = cmd_buffer_data->device;
2181 device_data->vtable.CmdDispatch(commandBuffer, groupCountX, groupCountY, groupCountZ);
2182 }
2183
overlay_CmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset)2184 static void overlay_CmdDispatchIndirect(
2185 VkCommandBuffer commandBuffer,
2186 VkBuffer buffer,
2187 VkDeviceSize offset)
2188 {
2189 struct command_buffer_data *cmd_buffer_data =
2190 FIND(struct command_buffer_data, commandBuffer);
2191 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_dispatch_indirect]++;
2192 struct device_data *device_data = cmd_buffer_data->device;
2193 device_data->vtable.CmdDispatchIndirect(commandBuffer, buffer, offset);
2194 }
2195
overlay_CmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline pipeline)2196 static void overlay_CmdBindPipeline(
2197 VkCommandBuffer commandBuffer,
2198 VkPipelineBindPoint pipelineBindPoint,
2199 VkPipeline pipeline)
2200 {
2201 struct command_buffer_data *cmd_buffer_data =
2202 FIND(struct command_buffer_data, commandBuffer);
2203 switch (pipelineBindPoint) {
2204 case VK_PIPELINE_BIND_POINT_GRAPHICS: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_graphics]++; break;
2205 case VK_PIPELINE_BIND_POINT_COMPUTE: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_compute]++; break;
2206 case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_raytracing]++; break;
2207 default: break;
2208 }
2209 struct device_data *device_data = cmd_buffer_data->device;
2210 device_data->vtable.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
2211 }
2212
overlay_BeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)2213 static VkResult overlay_BeginCommandBuffer(
2214 VkCommandBuffer commandBuffer,
2215 const VkCommandBufferBeginInfo* pBeginInfo)
2216 {
2217 struct command_buffer_data *cmd_buffer_data =
2218 FIND(struct command_buffer_data, commandBuffer);
2219 struct device_data *device_data = cmd_buffer_data->device;
2220
2221 memset(&cmd_buffer_data->stats, 0, sizeof(cmd_buffer_data->stats));
2222
2223 /* We don't record any query in secondary command buffers, just make sure
2224 * we have the right inheritance.
2225 */
2226 if (cmd_buffer_data->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
2227 VkCommandBufferBeginInfo begin_info = *pBeginInfo;
2228
2229 struct VkBaseOutStructure *new_pnext =
2230 clone_chain((const struct VkBaseInStructure *)pBeginInfo->pNext);
2231 VkCommandBufferInheritanceInfo inhe_info;
2232
2233 /* If there was no pNext chain given or we managed to copy it, we can
2234 * add our stuff in there.
2235 *
2236 * Otherwise, keep the old pointer. We failed to copy the pNext chain,
2237 * meaning there is an unknown extension somewhere in there.
2238 */
2239 if (new_pnext || pBeginInfo->pNext == NULL) {
2240 begin_info.pNext = new_pnext;
2241
2242 VkCommandBufferInheritanceInfo *parent_inhe_info = (VkCommandBufferInheritanceInfo *)
2243 vk_find_struct(new_pnext, COMMAND_BUFFER_INHERITANCE_INFO);
2244 inhe_info = (VkCommandBufferInheritanceInfo) {
2245 VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
2246 NULL,
2247 VK_NULL_HANDLE,
2248 0,
2249 VK_NULL_HANDLE,
2250 VK_FALSE,
2251 0,
2252 overlay_query_flags,
2253 };
2254
2255 if (parent_inhe_info)
2256 parent_inhe_info->pipelineStatistics = overlay_query_flags;
2257 else
2258 __vk_append_struct(&begin_info, &inhe_info);
2259 }
2260
2261 VkResult result = device_data->vtable.BeginCommandBuffer(
2262 commandBuffer, &begin_info);
2263
2264 free_chain(new_pnext);
2265
2266 return result;
2267 }
2268
2269 /* Otherwise record a begin query as first command. */
2270 VkResult result = device_data->vtable.BeginCommandBuffer(commandBuffer, pBeginInfo);
2271
2272 if (result == VK_SUCCESS) {
2273 if (cmd_buffer_data->pipeline_query_pool) {
2274 device_data->vtable.CmdResetQueryPool(commandBuffer,
2275 cmd_buffer_data->pipeline_query_pool,
2276 cmd_buffer_data->query_index, 1);
2277 }
2278 if (cmd_buffer_data->timestamp_query_pool) {
2279 device_data->vtable.CmdResetQueryPool(commandBuffer,
2280 cmd_buffer_data->timestamp_query_pool,
2281 cmd_buffer_data->query_index * 2, 2);
2282 }
2283 if (cmd_buffer_data->pipeline_query_pool) {
2284 device_data->vtable.CmdBeginQuery(commandBuffer,
2285 cmd_buffer_data->pipeline_query_pool,
2286 cmd_buffer_data->query_index, 0);
2287 }
2288 if (cmd_buffer_data->timestamp_query_pool) {
2289 device_data->vtable.CmdWriteTimestamp(commandBuffer,
2290 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
2291 cmd_buffer_data->timestamp_query_pool,
2292 cmd_buffer_data->query_index * 2);
2293 }
2294 }
2295
2296 return result;
2297 }
2298
overlay_EndCommandBuffer(VkCommandBuffer commandBuffer)2299 static VkResult overlay_EndCommandBuffer(
2300 VkCommandBuffer commandBuffer)
2301 {
2302 struct command_buffer_data *cmd_buffer_data =
2303 FIND(struct command_buffer_data, commandBuffer);
2304 struct device_data *device_data = cmd_buffer_data->device;
2305
2306 if (cmd_buffer_data->timestamp_query_pool) {
2307 device_data->vtable.CmdWriteTimestamp(commandBuffer,
2308 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
2309 cmd_buffer_data->timestamp_query_pool,
2310 cmd_buffer_data->query_index * 2 + 1);
2311 }
2312 if (cmd_buffer_data->pipeline_query_pool) {
2313 device_data->vtable.CmdEndQuery(commandBuffer,
2314 cmd_buffer_data->pipeline_query_pool,
2315 cmd_buffer_data->query_index);
2316 }
2317
2318 return device_data->vtable.EndCommandBuffer(commandBuffer);
2319 }
2320
overlay_ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)2321 static VkResult overlay_ResetCommandBuffer(
2322 VkCommandBuffer commandBuffer,
2323 VkCommandBufferResetFlags flags)
2324 {
2325 struct command_buffer_data *cmd_buffer_data =
2326 FIND(struct command_buffer_data, commandBuffer);
2327 struct device_data *device_data = cmd_buffer_data->device;
2328
2329 memset(&cmd_buffer_data->stats, 0, sizeof(cmd_buffer_data->stats));
2330
2331 return device_data->vtable.ResetCommandBuffer(commandBuffer, flags);
2332 }
2333
overlay_CmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)2334 static void overlay_CmdExecuteCommands(
2335 VkCommandBuffer commandBuffer,
2336 uint32_t commandBufferCount,
2337 const VkCommandBuffer* pCommandBuffers)
2338 {
2339 struct command_buffer_data *cmd_buffer_data =
2340 FIND(struct command_buffer_data, commandBuffer);
2341 struct device_data *device_data = cmd_buffer_data->device;
2342
2343 /* Add the stats of the executed command buffers to the primary one. */
2344 for (uint32_t c = 0; c < commandBufferCount; c++) {
2345 struct command_buffer_data *sec_cmd_buffer_data =
2346 FIND(struct command_buffer_data, pCommandBuffers[c]);
2347
2348 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++)
2349 cmd_buffer_data->stats.stats[s] += sec_cmd_buffer_data->stats.stats[s];
2350 }
2351
2352 device_data->vtable.CmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
2353 }
2354
overlay_AllocateCommandBuffers(VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)2355 static VkResult overlay_AllocateCommandBuffers(
2356 VkDevice device,
2357 const VkCommandBufferAllocateInfo* pAllocateInfo,
2358 VkCommandBuffer* pCommandBuffers)
2359 {
2360 struct device_data *device_data = FIND(struct device_data, device);
2361 VkResult result =
2362 device_data->vtable.AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
2363 if (result != VK_SUCCESS)
2364 return result;
2365
2366 VkQueryPool pipeline_query_pool = VK_NULL_HANDLE;
2367 VkQueryPool timestamp_query_pool = VK_NULL_HANDLE;
2368 if (device_data->pipeline_statistics_enabled &&
2369 pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2370 VkQueryPoolCreateInfo pool_info = {
2371 VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
2372 NULL,
2373 0,
2374 VK_QUERY_TYPE_PIPELINE_STATISTICS,
2375 pAllocateInfo->commandBufferCount,
2376 overlay_query_flags,
2377 };
2378 VK_CHECK(device_data->vtable.CreateQueryPool(device_data->device, &pool_info,
2379 NULL, &pipeline_query_pool));
2380 }
2381 if (device_data->instance->params.enabled[OVERLAY_PARAM_ENABLED_gpu_timing]) {
2382 VkQueryPoolCreateInfo pool_info = {
2383 VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
2384 NULL,
2385 0,
2386 VK_QUERY_TYPE_TIMESTAMP,
2387 pAllocateInfo->commandBufferCount * 2,
2388 0,
2389 };
2390 VK_CHECK(device_data->vtable.CreateQueryPool(device_data->device, &pool_info,
2391 NULL, ×tamp_query_pool));
2392 }
2393
2394 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
2395 new_command_buffer_data(pCommandBuffers[i], pAllocateInfo->level,
2396 pipeline_query_pool, timestamp_query_pool,
2397 i, device_data);
2398 }
2399
2400 if (pipeline_query_pool)
2401 map_object(HKEY(pipeline_query_pool), (void *)(uintptr_t) pAllocateInfo->commandBufferCount);
2402 if (timestamp_query_pool)
2403 map_object(HKEY(timestamp_query_pool), (void *)(uintptr_t) pAllocateInfo->commandBufferCount);
2404
2405 return result;
2406 }
2407
overlay_FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)2408 static void overlay_FreeCommandBuffers(
2409 VkDevice device,
2410 VkCommandPool commandPool,
2411 uint32_t commandBufferCount,
2412 const VkCommandBuffer* pCommandBuffers)
2413 {
2414 struct device_data *device_data = FIND(struct device_data, device);
2415 for (uint32_t i = 0; i < commandBufferCount; i++) {
2416 struct command_buffer_data *cmd_buffer_data =
2417 FIND(struct command_buffer_data, pCommandBuffers[i]);
2418
2419 /* It is legal to free a NULL command buffer*/
2420 if (!cmd_buffer_data)
2421 continue;
2422
2423 uint64_t count = (uintptr_t)find_object_data(HKEY(cmd_buffer_data->pipeline_query_pool));
2424 if (count == 1) {
2425 unmap_object(HKEY(cmd_buffer_data->pipeline_query_pool));
2426 device_data->vtable.DestroyQueryPool(device_data->device,
2427 cmd_buffer_data->pipeline_query_pool, NULL);
2428 } else if (count != 0) {
2429 map_object(HKEY(cmd_buffer_data->pipeline_query_pool), (void *)(uintptr_t)(count - 1));
2430 }
2431 count = (uintptr_t)find_object_data(HKEY(cmd_buffer_data->timestamp_query_pool));
2432 if (count == 1) {
2433 unmap_object(HKEY(cmd_buffer_data->timestamp_query_pool));
2434 device_data->vtable.DestroyQueryPool(device_data->device,
2435 cmd_buffer_data->timestamp_query_pool, NULL);
2436 } else if (count != 0) {
2437 map_object(HKEY(cmd_buffer_data->timestamp_query_pool), (void *)(uintptr_t)(count - 1));
2438 }
2439 destroy_command_buffer_data(cmd_buffer_data);
2440 }
2441
2442 device_data->vtable.FreeCommandBuffers(device, commandPool,
2443 commandBufferCount, pCommandBuffers);
2444 }
2445
overlay_QueueSubmit(VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)2446 static VkResult overlay_QueueSubmit(
2447 VkQueue queue,
2448 uint32_t submitCount,
2449 const VkSubmitInfo* pSubmits,
2450 VkFence fence)
2451 {
2452 struct queue_data *queue_data = FIND(struct queue_data, queue);
2453 struct device_data *device_data = queue_data->device;
2454
2455 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_submit]++;
2456
2457 for (uint32_t s = 0; s < submitCount; s++) {
2458 for (uint32_t c = 0; c < pSubmits[s].commandBufferCount; c++) {
2459 struct command_buffer_data *cmd_buffer_data =
2460 FIND(struct command_buffer_data, pSubmits[s].pCommandBuffers[c]);
2461
2462 /* Merge the submitted command buffer stats into the device. */
2463 for (uint32_t st = 0; st < OVERLAY_PARAM_ENABLED_MAX; st++)
2464 device_data->frame_stats.stats[st] += cmd_buffer_data->stats.stats[st];
2465
2466 /* Attach the command buffer to the queue so we remember to read its
2467 * pipeline statistics & timestamps at QueuePresent().
2468 */
2469 if (!cmd_buffer_data->pipeline_query_pool &&
2470 !cmd_buffer_data->timestamp_query_pool)
2471 continue;
2472
2473 if (list_is_empty(&cmd_buffer_data->link)) {
2474 list_addtail(&cmd_buffer_data->link,
2475 &queue_data->running_command_buffer);
2476 } else {
2477 fprintf(stderr, "Command buffer submitted multiple times before present.\n"
2478 "This could lead to invalid data.\n");
2479 }
2480 }
2481 }
2482
2483 return device_data->vtable.QueueSubmit(queue, submitCount, pSubmits, fence);
2484 }
2485
overlay_QueueSubmit2(VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)2486 static VkResult overlay_QueueSubmit2(
2487 VkQueue queue,
2488 uint32_t submitCount,
2489 const VkSubmitInfo2* pSubmits,
2490 VkFence fence)
2491 {
2492 struct queue_data *queue_data = FIND(struct queue_data, queue);
2493 struct device_data *device_data = queue_data->device;
2494
2495 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_submit]++;
2496
2497 for (uint32_t s = 0; s < submitCount; s++) {
2498 for (uint32_t c = 0; c < pSubmits[s].commandBufferInfoCount; c++) {
2499 struct command_buffer_data *cmd_buffer_data =
2500 FIND(struct command_buffer_data, pSubmits[s].pCommandBufferInfos[c].commandBuffer);
2501
2502 /* Merge the submitted command buffer stats into the device. */
2503 for (uint32_t st = 0; st < OVERLAY_PARAM_ENABLED_MAX; st++)
2504 device_data->frame_stats.stats[st] += cmd_buffer_data->stats.stats[st];
2505
2506 /* Attach the command buffer to the queue so we remember to read its
2507 * pipeline statistics & timestamps at QueuePresent().
2508 */
2509 if (!cmd_buffer_data->pipeline_query_pool &&
2510 !cmd_buffer_data->timestamp_query_pool)
2511 continue;
2512
2513 if (list_is_empty(&cmd_buffer_data->link)) {
2514 list_addtail(&cmd_buffer_data->link,
2515 &queue_data->running_command_buffer);
2516 } else {
2517 fprintf(stderr, "Command buffer submitted multiple times before present.\n"
2518 "This could lead to invalid data.\n");
2519 }
2520 }
2521 }
2522
2523 return device_data->vtable.QueueSubmit2(queue, submitCount, pSubmits, fence);
2524 }
2525
overlay_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)2526 static VkResult overlay_CreateDevice(
2527 VkPhysicalDevice physicalDevice,
2528 const VkDeviceCreateInfo* pCreateInfo,
2529 const VkAllocationCallbacks* pAllocator,
2530 VkDevice* pDevice)
2531 {
2532 struct instance_data *instance_data =
2533 FIND(struct instance_data, physicalDevice);
2534 VkLayerDeviceCreateInfo *chain_info =
2535 get_device_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2536
2537 assert(chain_info->u.pLayerInfo);
2538 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2539 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2540 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
2541 if (fpCreateDevice == NULL) {
2542 return VK_ERROR_INITIALIZATION_FAILED;
2543 }
2544
2545 // Advance the link info for the next element on the chain
2546 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2547
2548 VkPhysicalDeviceFeatures device_features = {};
2549 VkPhysicalDeviceFeatures *device_features_ptr = NULL;
2550
2551 VkDeviceCreateInfo create_info = *pCreateInfo;
2552
2553 struct VkBaseOutStructure *new_pnext =
2554 clone_chain((const struct VkBaseInStructure *) pCreateInfo->pNext);
2555 if (new_pnext != NULL) {
2556 create_info.pNext = new_pnext;
2557
2558 VkPhysicalDeviceFeatures2 *device_features2 = (VkPhysicalDeviceFeatures2 *)
2559 vk_find_struct(new_pnext, PHYSICAL_DEVICE_FEATURES_2);
2560 if (device_features2) {
2561 /* Can't use device_info->pEnabledFeatures when VkPhysicalDeviceFeatures2 is present */
2562 device_features_ptr = &device_features2->features;
2563 } else {
2564 if (create_info.pEnabledFeatures)
2565 device_features = *(create_info.pEnabledFeatures);
2566 device_features_ptr = &device_features;
2567 create_info.pEnabledFeatures = &device_features;
2568 }
2569
2570 if (instance_data->pipeline_statistics_enabled) {
2571 device_features_ptr->inheritedQueries = true;
2572 device_features_ptr->pipelineStatisticsQuery = true;
2573 }
2574 }
2575
2576 VkResult result = fpCreateDevice(physicalDevice, &create_info, pAllocator, pDevice);
2577 free_chain(new_pnext);
2578 if (result != VK_SUCCESS) return result;
2579
2580 struct device_data *device_data = new_device_data(*pDevice, instance_data);
2581 device_data->physical_device = physicalDevice;
2582 vk_device_dispatch_table_load(&device_data->vtable,
2583 fpGetDeviceProcAddr, *pDevice);
2584
2585 instance_data->pd_vtable.GetPhysicalDeviceProperties(device_data->physical_device,
2586 &device_data->properties);
2587
2588 VkLayerDeviceCreateInfo *load_data_info =
2589 get_device_chain_info(pCreateInfo, VK_LOADER_DATA_CALLBACK);
2590 device_data->set_device_loader_data = load_data_info->u.pfnSetDeviceLoaderData;
2591
2592 device_map_queues(device_data, pCreateInfo);
2593
2594 device_data->pipeline_statistics_enabled =
2595 new_pnext != NULL &&
2596 instance_data->pipeline_statistics_enabled;
2597
2598 return result;
2599 }
2600
overlay_DestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)2601 static void overlay_DestroyDevice(
2602 VkDevice device,
2603 const VkAllocationCallbacks* pAllocator)
2604 {
2605 struct device_data *device_data = FIND(struct device_data, device);
2606 device_unmap_queues(device_data);
2607 device_data->vtable.DestroyDevice(device, pAllocator);
2608 destroy_device_data(device_data);
2609 }
2610
overlay_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)2611 static VkResult overlay_CreateInstance(
2612 const VkInstanceCreateInfo* pCreateInfo,
2613 const VkAllocationCallbacks* pAllocator,
2614 VkInstance* pInstance)
2615 {
2616 VkLayerInstanceCreateInfo *chain_info =
2617 get_instance_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2618
2619 assert(chain_info->u.pLayerInfo);
2620 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr =
2621 chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2622 PFN_vkCreateInstance fpCreateInstance =
2623 (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
2624 if (fpCreateInstance == NULL) {
2625 return VK_ERROR_INITIALIZATION_FAILED;
2626 }
2627
2628 // Advance the link info for the next element on the chain
2629 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2630
2631 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
2632 if (result != VK_SUCCESS) return result;
2633
2634 struct instance_data *instance_data = new_instance_data(*pInstance);
2635 vk_instance_dispatch_table_load(&instance_data->vtable,
2636 fpGetInstanceProcAddr,
2637 instance_data->instance);
2638 vk_physical_device_dispatch_table_load(&instance_data->pd_vtable,
2639 fpGetInstanceProcAddr,
2640 instance_data->instance);
2641 instance_data_map_physical_devices(instance_data, true);
2642
2643 parse_overlay_env(&instance_data->params, getenv("VK_LAYER_MESA_OVERLAY_CONFIG"));
2644
2645 /* If there's no control file, and an output_file was specified, start
2646 * capturing fps data right away.
2647 */
2648 instance_data->capture_enabled =
2649 instance_data->params.output_file && instance_data->params.control == NULL;
2650 instance_data->capture_started = instance_data->capture_enabled;
2651
2652 for (int i = OVERLAY_PARAM_ENABLED_vertices;
2653 i <= OVERLAY_PARAM_ENABLED_compute_invocations; i++) {
2654 if (instance_data->params.enabled[i]) {
2655 instance_data->pipeline_statistics_enabled = true;
2656 break;
2657 }
2658 }
2659
2660 return result;
2661 }
2662
overlay_DestroyInstance(VkInstance instance,const VkAllocationCallbacks * pAllocator)2663 static void overlay_DestroyInstance(
2664 VkInstance instance,
2665 const VkAllocationCallbacks* pAllocator)
2666 {
2667 struct instance_data *instance_data = FIND(struct instance_data, instance);
2668 instance_data_map_physical_devices(instance_data, false);
2669 instance_data->vtable.DestroyInstance(instance, pAllocator);
2670 destroy_instance_data(instance_data);
2671 }
2672
2673 static const struct {
2674 const char *name;
2675 void *ptr;
2676 } name_to_funcptr_map[] = {
2677 { "vkGetInstanceProcAddr", (void *) vkGetInstanceProcAddr },
2678 { "vkGetDeviceProcAddr", (void *) vkGetDeviceProcAddr },
2679 #define ADD_HOOK(fn) { "vk" # fn, (void *) overlay_ ## fn }
2680 #define ADD_ALIAS_HOOK(alias, fn) { "vk" # alias, (void *) overlay_ ## fn }
2681 ADD_HOOK(AllocateCommandBuffers),
2682 ADD_HOOK(FreeCommandBuffers),
2683 ADD_HOOK(ResetCommandBuffer),
2684 ADD_HOOK(BeginCommandBuffer),
2685 ADD_HOOK(EndCommandBuffer),
2686 ADD_HOOK(CmdExecuteCommands),
2687
2688 ADD_HOOK(CmdDraw),
2689 ADD_HOOK(CmdDrawIndexed),
2690 ADD_HOOK(CmdDrawIndirect),
2691 ADD_HOOK(CmdDrawIndexedIndirect),
2692 ADD_HOOK(CmdDispatch),
2693 ADD_HOOK(CmdDispatchIndirect),
2694 ADD_HOOK(CmdDrawIndirectCount),
2695 ADD_ALIAS_HOOK(CmdDrawIndirectCountKHR, CmdDrawIndirectCount),
2696 ADD_HOOK(CmdDrawIndexedIndirectCount),
2697 ADD_ALIAS_HOOK(CmdDrawIndexedIndirectCountKHR, CmdDrawIndexedIndirectCount),
2698
2699 ADD_HOOK(CmdBindPipeline),
2700
2701 ADD_HOOK(CreateSwapchainKHR),
2702 ADD_HOOK(QueuePresentKHR),
2703 ADD_HOOK(DestroySwapchainKHR),
2704 ADD_HOOK(AcquireNextImageKHR),
2705 ADD_HOOK(AcquireNextImage2KHR),
2706
2707 ADD_HOOK(QueueSubmit),
2708 ADD_HOOK(QueueSubmit2),
2709
2710 ADD_HOOK(CreateDevice),
2711 ADD_HOOK(DestroyDevice),
2712
2713 ADD_HOOK(CreateInstance),
2714 ADD_HOOK(DestroyInstance),
2715 #undef ADD_HOOK
2716 #undef ADD_ALIAS_HOOK
2717 };
2718
find_ptr(const char * name)2719 static void *find_ptr(const char *name)
2720 {
2721 for (uint32_t i = 0; i < ARRAY_SIZE(name_to_funcptr_map); i++) {
2722 if (strcmp(name, name_to_funcptr_map[i].name) == 0)
2723 return name_to_funcptr_map[i].ptr;
2724 }
2725
2726 return NULL;
2727 }
2728
vkGetDeviceProcAddr(VkDevice dev,const char * funcName)2729 PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev,
2730 const char *funcName)
2731 {
2732 void *ptr = find_ptr(funcName);
2733 if (ptr) return reinterpret_cast<PFN_vkVoidFunction>(ptr);
2734
2735 if (dev == NULL) return NULL;
2736
2737 struct device_data *device_data = FIND(struct device_data, dev);
2738 if (device_data->vtable.GetDeviceProcAddr == NULL) return NULL;
2739 return device_data->vtable.GetDeviceProcAddr(dev, funcName);
2740 }
2741
vkGetInstanceProcAddr(VkInstance instance,const char * funcName)2742 PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance,
2743 const char *funcName)
2744 {
2745 void *ptr = find_ptr(funcName);
2746 if (ptr) return reinterpret_cast<PFN_vkVoidFunction>(ptr);
2747
2748 if (instance == NULL) return NULL;
2749
2750 struct instance_data *instance_data = FIND(struct instance_data, instance);
2751 if (instance_data->vtable.GetInstanceProcAddr == NULL) return NULL;
2752 return instance_data->vtable.GetInstanceProcAddr(instance, funcName);
2753 }
2754