1 /*
2 * Copyright 2020 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "vkr_queue.h"
7
8 #include "venus-protocol/vn_protocol_renderer_queue.h"
9
10 #include "vkr_context.h"
11 #include "vkr_physical_device.h"
12 #include "vkr_queue_gen.h"
13
14 struct vkr_queue_sync *
vkr_device_alloc_queue_sync(struct vkr_device * dev,uint32_t fence_flags,uint32_t ring_idx,uint64_t fence_id)15 vkr_device_alloc_queue_sync(struct vkr_device *dev,
16 uint32_t fence_flags,
17 uint32_t ring_idx,
18 uint64_t fence_id)
19 {
20 struct vn_device_proc_table *vk = &dev->proc_table;
21 struct vkr_queue_sync *sync;
22
23 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
24 mtx_lock(&dev->free_sync_mutex);
25
26 if (LIST_IS_EMPTY(&dev->free_syncs)) {
27 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
28 mtx_unlock(&dev->free_sync_mutex);
29
30 sync = malloc(sizeof(*sync));
31 if (!sync)
32 return NULL;
33
34 const VkExportFenceCreateInfo export_info = {
35 .sType = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO,
36 .handleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
37 };
38 const struct VkFenceCreateInfo create_info = {
39 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
40 .pNext = dev->physical_device->KHR_external_fence_fd ? &export_info : NULL,
41 };
42 VkResult result =
43 vk->CreateFence(dev->base.handle.device, &create_info, NULL, &sync->fence);
44 if (result != VK_SUCCESS) {
45 free(sync);
46 return NULL;
47 }
48 } else {
49 sync = LIST_ENTRY(struct vkr_queue_sync, dev->free_syncs.next, head);
50 list_del(&sync->head);
51
52 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
53 mtx_unlock(&dev->free_sync_mutex);
54
55 vk->ResetFences(dev->base.handle.device, 1, &sync->fence);
56 }
57
58 sync->device_lost = false;
59 sync->flags = fence_flags;
60 sync->ring_idx = ring_idx;
61 sync->fence_id = fence_id;
62
63 return sync;
64 }
65
66 void
vkr_device_free_queue_sync(struct vkr_device * dev,struct vkr_queue_sync * sync)67 vkr_device_free_queue_sync(struct vkr_device *dev, struct vkr_queue_sync *sync)
68 {
69 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
70 mtx_lock(&dev->free_sync_mutex);
71 list_addtail(&sync->head, &dev->free_syncs);
72 mtx_unlock(&dev->free_sync_mutex);
73 } else {
74 list_addtail(&sync->head, &dev->free_syncs);
75 }
76 }
77
78 void
vkr_queue_get_signaled_syncs(struct vkr_queue * queue,struct list_head * retired_syncs,bool * queue_empty)79 vkr_queue_get_signaled_syncs(struct vkr_queue *queue,
80 struct list_head *retired_syncs,
81 bool *queue_empty)
82 {
83 struct vkr_device *dev = queue->device;
84 struct vn_device_proc_table *vk = &dev->proc_table;
85 struct vkr_queue_sync *sync, *tmp;
86
87 assert(!(vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB));
88
89 list_inithead(retired_syncs);
90
91 if (vkr_renderer_flags & VKR_RENDERER_THREAD_SYNC) {
92 mtx_lock(&queue->mutex);
93
94 LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->signaled_syncs, head) {
95 if (sync->head.next == &queue->signaled_syncs ||
96 !(sync->flags & VIRGL_RENDERER_FENCE_FLAG_MERGEABLE))
97 list_addtail(&sync->head, retired_syncs);
98 else
99 vkr_device_free_queue_sync(dev, sync);
100 }
101 list_inithead(&queue->signaled_syncs);
102
103 *queue_empty = LIST_IS_EMPTY(&queue->pending_syncs);
104
105 mtx_unlock(&queue->mutex);
106 } else {
107 LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->pending_syncs, head) {
108 if (!sync->device_lost) {
109 VkResult result = vk->GetFenceStatus(dev->base.handle.device, sync->fence);
110 if (result == VK_NOT_READY)
111 break;
112 }
113
114 bool is_last_sync = sync->head.next == &queue->pending_syncs;
115
116 list_del(&sync->head);
117 if (is_last_sync || !(sync->flags & VIRGL_RENDERER_FENCE_FLAG_MERGEABLE))
118 list_addtail(&sync->head, retired_syncs);
119 else
120 vkr_device_free_queue_sync(dev, sync);
121 }
122
123 *queue_empty = LIST_IS_EMPTY(&queue->pending_syncs);
124 }
125 }
126
127 static void
vkr_queue_sync_retire(struct vkr_context * ctx,struct vkr_device * dev,struct vkr_queue_sync * sync)128 vkr_queue_sync_retire(struct vkr_context *ctx,
129 struct vkr_device *dev,
130 struct vkr_queue_sync *sync)
131 {
132 struct vn_device_proc_table *vk = &dev->proc_table;
133
134 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
135 ctx->base.fence_retire(&ctx->base, sync->ring_idx, sync->fence_id);
136 vkr_device_free_queue_sync(dev, sync);
137 } else {
138 vk->DestroyFence(dev->base.handle.device, sync->fence, NULL);
139 sync->fence = VK_NULL_HANDLE;
140
141 /* move to the ctx to be retired and freed at the next retire_fences */
142 list_addtail(&sync->head, &ctx->signaled_syncs);
143 }
144 }
145
146 static void
vkr_queue_retire_all_syncs(struct vkr_context * ctx,struct vkr_queue * queue)147 vkr_queue_retire_all_syncs(struct vkr_context *ctx, struct vkr_queue *queue)
148 {
149 struct vkr_queue_sync *sync, *tmp;
150
151 if (vkr_renderer_flags & VKR_RENDERER_THREAD_SYNC) {
152 mtx_lock(&queue->mutex);
153 queue->join = true;
154 mtx_unlock(&queue->mutex);
155
156 cnd_signal(&queue->cond);
157 thrd_join(queue->thread, NULL);
158
159 LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->signaled_syncs, head)
160 vkr_queue_sync_retire(ctx, queue->device, sync);
161 } else {
162 assert(LIST_IS_EMPTY(&queue->signaled_syncs));
163 }
164
165 LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->pending_syncs, head)
166 vkr_queue_sync_retire(ctx, queue->device, sync);
167 }
168
169 void
vkr_queue_destroy(struct vkr_context * ctx,struct vkr_queue * queue)170 vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue)
171 {
172 /* vkDeviceWaitIdle has been called */
173 vkr_queue_retire_all_syncs(ctx, queue);
174
175 mtx_destroy(&queue->mutex);
176 cnd_destroy(&queue->cond);
177
178 list_del(&queue->busy_head);
179 list_del(&queue->base.track_head);
180
181 if (queue->ring_idx > 0)
182 ctx->sync_queues[queue->ring_idx] = NULL;
183
184 if (queue->base.id)
185 vkr_context_remove_object(ctx, &queue->base);
186 else
187 free(queue);
188 }
189
190 static int
vkr_queue_thread(void * arg)191 vkr_queue_thread(void *arg)
192 {
193 struct vkr_queue *queue = arg;
194 struct vkr_context *ctx = queue->context;
195 struct vkr_device *dev = queue->device;
196 struct vn_device_proc_table *vk = &dev->proc_table;
197 const uint64_t ns_per_sec = 1000000000llu;
198 char thread_name[16];
199
200 snprintf(thread_name, ARRAY_SIZE(thread_name), "vkr-queue-%d", ctx->base.ctx_id);
201 u_thread_setname(thread_name);
202
203 mtx_lock(&queue->mutex);
204 while (true) {
205 while (LIST_IS_EMPTY(&queue->pending_syncs) && !queue->join)
206 cnd_wait(&queue->cond, &queue->mutex);
207
208 if (queue->join)
209 break;
210
211 struct vkr_queue_sync *sync =
212 LIST_ENTRY(struct vkr_queue_sync, queue->pending_syncs.next, head);
213
214 mtx_unlock(&queue->mutex);
215
216 VkResult result;
217 if (sync->device_lost) {
218 result = VK_ERROR_DEVICE_LOST;
219 } else {
220 result = vk->WaitForFences(dev->base.handle.device, 1, &sync->fence, true,
221 ns_per_sec * 3);
222 }
223
224 mtx_lock(&queue->mutex);
225
226 if (result == VK_TIMEOUT)
227 continue;
228
229 list_del(&sync->head);
230
231 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
232 ctx->base.fence_retire(&ctx->base, sync->ring_idx, sync->fence_id);
233 vkr_device_free_queue_sync(queue->device, sync);
234 } else {
235 list_addtail(&sync->head, &queue->signaled_syncs);
236 write_eventfd(queue->eventfd, 1);
237 }
238 }
239 mtx_unlock(&queue->mutex);
240
241 return 0;
242 }
243
244 struct vkr_queue *
vkr_queue_create(struct vkr_context * ctx,struct vkr_device * dev,VkDeviceQueueCreateFlags flags,uint32_t family,uint32_t index,VkQueue handle)245 vkr_queue_create(struct vkr_context *ctx,
246 struct vkr_device *dev,
247 VkDeviceQueueCreateFlags flags,
248 uint32_t family,
249 uint32_t index,
250 VkQueue handle)
251 {
252 struct vkr_queue *queue;
253 int ret;
254
255 /* id is set to 0 until vkr_queue_assign_object_id */
256 queue = vkr_object_alloc(sizeof(*queue), VK_OBJECT_TYPE_QUEUE, 0);
257 if (!queue)
258 return NULL;
259
260 queue->base.handle.queue = handle;
261
262 queue->context = ctx;
263 queue->device = dev;
264 queue->flags = flags;
265 queue->family = family;
266 queue->index = index;
267
268 list_inithead(&queue->pending_syncs);
269 list_inithead(&queue->signaled_syncs);
270
271 ret = mtx_init(&queue->mutex, mtx_plain);
272 if (ret != thrd_success) {
273 free(queue);
274 return NULL;
275 }
276 ret = cnd_init(&queue->cond);
277 if (ret != thrd_success) {
278 mtx_destroy(&queue->mutex);
279 free(queue);
280 return NULL;
281 }
282
283 if (vkr_renderer_flags & VKR_RENDERER_THREAD_SYNC) {
284 ret = thrd_create(&queue->thread, vkr_queue_thread, queue);
285 if (ret != thrd_success) {
286 mtx_destroy(&queue->mutex);
287 cnd_destroy(&queue->cond);
288 free(queue);
289 return NULL;
290 }
291 queue->eventfd = ctx->fence_eventfd;
292 }
293
294 list_inithead(&queue->busy_head);
295 list_inithead(&queue->base.track_head);
296
297 return queue;
298 }
299
300 static void
vkr_queue_assign_object_id(struct vkr_context * ctx,struct vkr_queue * queue,vkr_object_id id)301 vkr_queue_assign_object_id(struct vkr_context *ctx,
302 struct vkr_queue *queue,
303 vkr_object_id id)
304 {
305 if (queue->base.id) {
306 if (queue->base.id != id)
307 vkr_cs_decoder_set_fatal(&ctx->decoder);
308 return;
309 }
310 if (!vkr_context_validate_object_id(ctx, id))
311 return;
312
313 queue->base.id = id;
314
315 vkr_context_add_object(ctx, &queue->base);
316 }
317
318 static struct vkr_queue *
vkr_device_lookup_queue(struct vkr_device * dev,VkDeviceQueueCreateFlags flags,uint32_t family,uint32_t index)319 vkr_device_lookup_queue(struct vkr_device *dev,
320 VkDeviceQueueCreateFlags flags,
321 uint32_t family,
322 uint32_t index)
323 {
324 struct vkr_queue *queue;
325
326 LIST_FOR_EACH_ENTRY (queue, &dev->queues, base.track_head) {
327 if (queue->flags == flags && queue->family == family && queue->index == index)
328 return queue;
329 }
330
331 return NULL;
332 }
333
334 static void
vkr_dispatch_vkGetDeviceQueue(struct vn_dispatch_context * dispatch,struct vn_command_vkGetDeviceQueue * args)335 vkr_dispatch_vkGetDeviceQueue(struct vn_dispatch_context *dispatch,
336 struct vn_command_vkGetDeviceQueue *args)
337 {
338 struct vkr_context *ctx = dispatch->data;
339
340 struct vkr_device *dev = vkr_device_from_handle(args->device);
341
342 struct vkr_queue *queue = vkr_device_lookup_queue(
343 dev, 0 /* flags */, args->queueFamilyIndex, args->queueIndex);
344 if (!queue) {
345 vkr_cs_decoder_set_fatal(&ctx->decoder);
346 return;
347 }
348
349 const vkr_object_id id =
350 vkr_cs_handle_load_id((const void **)args->pQueue, VK_OBJECT_TYPE_QUEUE);
351 vkr_queue_assign_object_id(ctx, queue, id);
352 }
353
354 static void
vkr_dispatch_vkGetDeviceQueue2(struct vn_dispatch_context * dispatch,struct vn_command_vkGetDeviceQueue2 * args)355 vkr_dispatch_vkGetDeviceQueue2(struct vn_dispatch_context *dispatch,
356 struct vn_command_vkGetDeviceQueue2 *args)
357 {
358 struct vkr_context *ctx = dispatch->data;
359
360 struct vkr_device *dev = vkr_device_from_handle(args->device);
361
362 struct vkr_queue *queue = vkr_device_lookup_queue(dev, args->pQueueInfo->flags,
363 args->pQueueInfo->queueFamilyIndex,
364 args->pQueueInfo->queueIndex);
365 if (!queue) {
366 vkr_cs_decoder_set_fatal(&ctx->decoder);
367 return;
368 }
369
370 const VkDeviceQueueTimelineInfoMESA *timeline_info = vkr_find_struct(
371 args->pQueueInfo->pNext, VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA);
372 if (timeline_info) {
373 if (timeline_info->ringIdx == 0 ||
374 timeline_info->ringIdx >= ARRAY_SIZE(ctx->sync_queues)) {
375 vkr_log("invalid ring_idx %d", timeline_info->ringIdx);
376 vkr_cs_decoder_set_fatal(&ctx->decoder);
377 return;
378 }
379
380 if (ctx->sync_queues[timeline_info->ringIdx]) {
381 vkr_log("sync_queue %d already bound", timeline_info->ringIdx);
382 vkr_cs_decoder_set_fatal(&ctx->decoder);
383 return;
384 }
385
386 queue->ring_idx = timeline_info->ringIdx;
387 ctx->sync_queues[timeline_info->ringIdx] = queue;
388 }
389
390 const vkr_object_id id =
391 vkr_cs_handle_load_id((const void **)args->pQueue, VK_OBJECT_TYPE_QUEUE);
392 vkr_queue_assign_object_id(ctx, queue, id);
393 }
394
395 static void
vkr_dispatch_vkQueueSubmit(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkQueueSubmit * args)396 vkr_dispatch_vkQueueSubmit(UNUSED struct vn_dispatch_context *dispatch,
397 struct vn_command_vkQueueSubmit *args)
398 {
399 struct vkr_queue *queue = vkr_queue_from_handle(args->queue);
400 struct vn_device_proc_table *vk = &queue->device->proc_table;
401
402 vn_replace_vkQueueSubmit_args_handle(args);
403 args->ret =
404 vk->QueueSubmit(args->queue, args->submitCount, args->pSubmits, args->fence);
405 }
406
407 static void
vkr_dispatch_vkQueueBindSparse(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkQueueBindSparse * args)408 vkr_dispatch_vkQueueBindSparse(UNUSED struct vn_dispatch_context *dispatch,
409 struct vn_command_vkQueueBindSparse *args)
410 {
411 struct vkr_queue *queue = vkr_queue_from_handle(args->queue);
412 struct vn_device_proc_table *vk = &queue->device->proc_table;
413
414 vn_replace_vkQueueBindSparse_args_handle(args);
415 args->ret =
416 vk->QueueBindSparse(args->queue, args->bindInfoCount, args->pBindInfo, args->fence);
417 }
418
419 static void
vkr_dispatch_vkQueueWaitIdle(struct vn_dispatch_context * dispatch,UNUSED struct vn_command_vkQueueWaitIdle * args)420 vkr_dispatch_vkQueueWaitIdle(struct vn_dispatch_context *dispatch,
421 UNUSED struct vn_command_vkQueueWaitIdle *args)
422 {
423 struct vkr_context *ctx = dispatch->data;
424 /* no blocking call */
425 vkr_cs_decoder_set_fatal(&ctx->decoder);
426 }
427
428 static void
vkr_dispatch_vkQueueSubmit2(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkQueueSubmit2 * args)429 vkr_dispatch_vkQueueSubmit2(UNUSED struct vn_dispatch_context *dispatch,
430 struct vn_command_vkQueueSubmit2 *args)
431 {
432 struct vkr_queue *queue = vkr_queue_from_handle(args->queue);
433 struct vn_device_proc_table *vk = &queue->device->proc_table;
434
435 vn_replace_vkQueueSubmit2_args_handle(args);
436 args->ret =
437 vk->QueueSubmit2(args->queue, args->submitCount, args->pSubmits, args->fence);
438 }
439
440 static void
vkr_dispatch_vkCreateFence(struct vn_dispatch_context * dispatch,struct vn_command_vkCreateFence * args)441 vkr_dispatch_vkCreateFence(struct vn_dispatch_context *dispatch,
442 struct vn_command_vkCreateFence *args)
443 {
444 vkr_fence_create_and_add(dispatch->data, args);
445 }
446
447 static void
vkr_dispatch_vkDestroyFence(struct vn_dispatch_context * dispatch,struct vn_command_vkDestroyFence * args)448 vkr_dispatch_vkDestroyFence(struct vn_dispatch_context *dispatch,
449 struct vn_command_vkDestroyFence *args)
450 {
451 vkr_fence_destroy_and_remove(dispatch->data, args);
452 }
453
454 static void
vkr_dispatch_vkResetFences(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkResetFences * args)455 vkr_dispatch_vkResetFences(UNUSED struct vn_dispatch_context *dispatch,
456 struct vn_command_vkResetFences *args)
457 {
458 struct vkr_device *dev = vkr_device_from_handle(args->device);
459 struct vn_device_proc_table *vk = &dev->proc_table;
460
461 vn_replace_vkResetFences_args_handle(args);
462 args->ret = vk->ResetFences(args->device, args->fenceCount, args->pFences);
463 }
464
465 static void
vkr_dispatch_vkGetFenceStatus(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkGetFenceStatus * args)466 vkr_dispatch_vkGetFenceStatus(UNUSED struct vn_dispatch_context *dispatch,
467 struct vn_command_vkGetFenceStatus *args)
468 {
469 struct vkr_device *dev = vkr_device_from_handle(args->device);
470 struct vn_device_proc_table *vk = &dev->proc_table;
471
472 vn_replace_vkGetFenceStatus_args_handle(args);
473 args->ret = vk->GetFenceStatus(args->device, args->fence);
474 }
475
476 static void
vkr_dispatch_vkWaitForFences(struct vn_dispatch_context * dispatch,struct vn_command_vkWaitForFences * args)477 vkr_dispatch_vkWaitForFences(struct vn_dispatch_context *dispatch,
478 struct vn_command_vkWaitForFences *args)
479 {
480 struct vkr_context *ctx = dispatch->data;
481 struct vkr_device *dev = vkr_device_from_handle(args->device);
482 struct vn_device_proc_table *vk = &dev->proc_table;
483
484 vn_replace_vkWaitForFences_args_handle(args);
485 args->ret = vk->WaitForFences(args->device, args->fenceCount, args->pFences,
486 args->waitAll, args->timeout);
487
488 if (args->ret == VK_ERROR_DEVICE_LOST)
489 vkr_cs_decoder_set_fatal(&ctx->decoder);
490 }
491
492 static void
vkr_dispatch_vkResetFenceResource100000MESA(struct vn_dispatch_context * dispatch,struct vn_command_vkResetFenceResource100000MESA * args)493 vkr_dispatch_vkResetFenceResource100000MESA(
494 struct vn_dispatch_context *dispatch,
495 struct vn_command_vkResetFenceResource100000MESA *args)
496 {
497 struct vkr_context *ctx = dispatch->data;
498 struct vkr_device *dev = vkr_device_from_handle(args->device);
499 struct vn_device_proc_table *vk = &dev->proc_table;
500 int fd = -1;
501
502 vn_replace_vkResetFenceResource100000MESA_args_handle(args);
503
504 const VkFenceGetFdInfoKHR info = {
505 .sType = VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR,
506 .fence = args->fence,
507 .handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
508 };
509 VkResult result = vk->GetFenceFdKHR(args->device, &info, &fd);
510 if (result != VK_SUCCESS) {
511 vkr_cs_decoder_set_fatal(&ctx->decoder);
512 return;
513 }
514
515 if (fd >= 0)
516 close(fd);
517 }
518
519 static void
vkr_dispatch_vkCreateSemaphore(struct vn_dispatch_context * dispatch,struct vn_command_vkCreateSemaphore * args)520 vkr_dispatch_vkCreateSemaphore(struct vn_dispatch_context *dispatch,
521 struct vn_command_vkCreateSemaphore *args)
522 {
523 vkr_semaphore_create_and_add(dispatch->data, args);
524 }
525
526 static void
vkr_dispatch_vkDestroySemaphore(struct vn_dispatch_context * dispatch,struct vn_command_vkDestroySemaphore * args)527 vkr_dispatch_vkDestroySemaphore(struct vn_dispatch_context *dispatch,
528 struct vn_command_vkDestroySemaphore *args)
529 {
530 vkr_semaphore_destroy_and_remove(dispatch->data, args);
531 }
532
533 static void
vkr_dispatch_vkGetSemaphoreCounterValue(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkGetSemaphoreCounterValue * args)534 vkr_dispatch_vkGetSemaphoreCounterValue(UNUSED struct vn_dispatch_context *dispatch,
535 struct vn_command_vkGetSemaphoreCounterValue *args)
536 {
537 struct vkr_device *dev = vkr_device_from_handle(args->device);
538 struct vn_device_proc_table *vk = &dev->proc_table;
539
540 vn_replace_vkGetSemaphoreCounterValue_args_handle(args);
541 args->ret = vk->GetSemaphoreCounterValue(args->device, args->semaphore, args->pValue);
542 }
543
544 static void
vkr_dispatch_vkWaitSemaphores(struct vn_dispatch_context * dispatch,struct vn_command_vkWaitSemaphores * args)545 vkr_dispatch_vkWaitSemaphores(struct vn_dispatch_context *dispatch,
546 struct vn_command_vkWaitSemaphores *args)
547 {
548 struct vkr_context *ctx = dispatch->data;
549 struct vkr_device *dev = vkr_device_from_handle(args->device);
550 struct vn_device_proc_table *vk = &dev->proc_table;
551
552 vn_replace_vkWaitSemaphores_args_handle(args);
553 args->ret = vk->WaitSemaphores(args->device, args->pWaitInfo, args->timeout);
554
555 if (args->ret == VK_ERROR_DEVICE_LOST)
556 vkr_cs_decoder_set_fatal(&ctx->decoder);
557 }
558
559 static void
vkr_dispatch_vkSignalSemaphore(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkSignalSemaphore * args)560 vkr_dispatch_vkSignalSemaphore(UNUSED struct vn_dispatch_context *dispatch,
561 struct vn_command_vkSignalSemaphore *args)
562 {
563 struct vkr_device *dev = vkr_device_from_handle(args->device);
564 struct vn_device_proc_table *vk = &dev->proc_table;
565
566 vn_replace_vkSignalSemaphore_args_handle(args);
567 args->ret = vk->SignalSemaphore(args->device, args->pSignalInfo);
568 }
569
570 static void
vkr_dispatch_vkWaitSemaphoreResource100000MESA(struct vn_dispatch_context * dispatch,struct vn_command_vkWaitSemaphoreResource100000MESA * args)571 vkr_dispatch_vkWaitSemaphoreResource100000MESA(
572 struct vn_dispatch_context *dispatch,
573 struct vn_command_vkWaitSemaphoreResource100000MESA *args)
574 {
575 struct vkr_context *ctx = dispatch->data;
576 struct vkr_device *dev = vkr_device_from_handle(args->device);
577 struct vn_device_proc_table *vk = &dev->proc_table;
578 int fd = -1;
579
580 vn_replace_vkWaitSemaphoreResource100000MESA_args_handle(args);
581
582 const VkSemaphoreGetFdInfoKHR info = {
583 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
584 .semaphore = args->semaphore,
585 .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
586 };
587 VkResult result = vk->GetSemaphoreFdKHR(args->device, &info, &fd);
588 if (result != VK_SUCCESS) {
589 vkr_cs_decoder_set_fatal(&ctx->decoder);
590 return;
591 }
592
593 if (fd >= 0)
594 close(fd);
595 }
596
597 static void
vkr_dispatch_vkImportSemaphoreResource100000MESA(struct vn_dispatch_context * dispatch,struct vn_command_vkImportSemaphoreResource100000MESA * args)598 vkr_dispatch_vkImportSemaphoreResource100000MESA(
599 struct vn_dispatch_context *dispatch,
600 struct vn_command_vkImportSemaphoreResource100000MESA *args)
601 {
602 struct vkr_context *ctx = dispatch->data;
603 struct vkr_device *dev = vkr_device_from_handle(args->device);
604 struct vn_device_proc_table *vk = &dev->proc_table;
605
606 vn_replace_vkImportSemaphoreResource100000MESA_args_handle(args);
607
608 const VkImportSemaphoreResourceInfo100000MESA *res_info =
609 args->pImportSemaphoreResourceInfo;
610
611 /* resourceId 0 is for importing a signaled payload to sync_fd fence */
612 assert(!res_info->resourceId);
613
614 const VkImportSemaphoreFdInfoKHR import_info = {
615 .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,
616 .semaphore = res_info->semaphore,
617 .flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT,
618 .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
619 .fd = -1,
620 };
621 if (vk->ImportSemaphoreFdKHR(args->device, &import_info) != VK_SUCCESS)
622 vkr_cs_decoder_set_fatal(&ctx->decoder);
623 }
624
625 static void
vkr_dispatch_vkCreateEvent(struct vn_dispatch_context * dispatch,struct vn_command_vkCreateEvent * args)626 vkr_dispatch_vkCreateEvent(struct vn_dispatch_context *dispatch,
627 struct vn_command_vkCreateEvent *args)
628 {
629 vkr_event_create_and_add(dispatch->data, args);
630 }
631
632 static void
vkr_dispatch_vkDestroyEvent(struct vn_dispatch_context * dispatch,struct vn_command_vkDestroyEvent * args)633 vkr_dispatch_vkDestroyEvent(struct vn_dispatch_context *dispatch,
634 struct vn_command_vkDestroyEvent *args)
635 {
636 vkr_event_destroy_and_remove(dispatch->data, args);
637 }
638
639 static void
vkr_dispatch_vkGetEventStatus(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkGetEventStatus * args)640 vkr_dispatch_vkGetEventStatus(UNUSED struct vn_dispatch_context *dispatch,
641 struct vn_command_vkGetEventStatus *args)
642 {
643 struct vkr_device *dev = vkr_device_from_handle(args->device);
644 struct vn_device_proc_table *vk = &dev->proc_table;
645
646 vn_replace_vkGetEventStatus_args_handle(args);
647 args->ret = vk->GetEventStatus(args->device, args->event);
648 }
649
650 static void
vkr_dispatch_vkSetEvent(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkSetEvent * args)651 vkr_dispatch_vkSetEvent(UNUSED struct vn_dispatch_context *dispatch,
652 struct vn_command_vkSetEvent *args)
653 {
654 struct vkr_device *dev = vkr_device_from_handle(args->device);
655 struct vn_device_proc_table *vk = &dev->proc_table;
656
657 vn_replace_vkSetEvent_args_handle(args);
658 args->ret = vk->SetEvent(args->device, args->event);
659 }
660
661 static void
vkr_dispatch_vkResetEvent(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkResetEvent * args)662 vkr_dispatch_vkResetEvent(UNUSED struct vn_dispatch_context *dispatch,
663 struct vn_command_vkResetEvent *args)
664 {
665 struct vkr_device *dev = vkr_device_from_handle(args->device);
666 struct vn_device_proc_table *vk = &dev->proc_table;
667
668 vn_replace_vkResetEvent_args_handle(args);
669 args->ret = vk->ResetEvent(args->device, args->event);
670 }
671
672 void
vkr_context_init_queue_dispatch(struct vkr_context * ctx)673 vkr_context_init_queue_dispatch(struct vkr_context *ctx)
674 {
675 struct vn_dispatch_context *dispatch = &ctx->dispatch;
676
677 dispatch->dispatch_vkGetDeviceQueue = vkr_dispatch_vkGetDeviceQueue;
678 dispatch->dispatch_vkGetDeviceQueue2 = vkr_dispatch_vkGetDeviceQueue2;
679 dispatch->dispatch_vkQueueSubmit = vkr_dispatch_vkQueueSubmit;
680 dispatch->dispatch_vkQueueBindSparse = vkr_dispatch_vkQueueBindSparse;
681 dispatch->dispatch_vkQueueWaitIdle = vkr_dispatch_vkQueueWaitIdle;
682
683 /* VK_KHR_synchronization2 */
684 dispatch->dispatch_vkQueueSubmit2 = vkr_dispatch_vkQueueSubmit2;
685 }
686
687 void
vkr_context_init_fence_dispatch(struct vkr_context * ctx)688 vkr_context_init_fence_dispatch(struct vkr_context *ctx)
689 {
690 struct vn_dispatch_context *dispatch = &ctx->dispatch;
691
692 dispatch->dispatch_vkCreateFence = vkr_dispatch_vkCreateFence;
693 dispatch->dispatch_vkDestroyFence = vkr_dispatch_vkDestroyFence;
694 dispatch->dispatch_vkResetFences = vkr_dispatch_vkResetFences;
695 dispatch->dispatch_vkGetFenceStatus = vkr_dispatch_vkGetFenceStatus;
696 dispatch->dispatch_vkWaitForFences = vkr_dispatch_vkWaitForFences;
697
698 dispatch->dispatch_vkResetFenceResource100000MESA =
699 vkr_dispatch_vkResetFenceResource100000MESA;
700 }
701
702 void
vkr_context_init_semaphore_dispatch(struct vkr_context * ctx)703 vkr_context_init_semaphore_dispatch(struct vkr_context *ctx)
704 {
705 struct vn_dispatch_context *dispatch = &ctx->dispatch;
706
707 dispatch->dispatch_vkCreateSemaphore = vkr_dispatch_vkCreateSemaphore;
708 dispatch->dispatch_vkDestroySemaphore = vkr_dispatch_vkDestroySemaphore;
709 dispatch->dispatch_vkGetSemaphoreCounterValue =
710 vkr_dispatch_vkGetSemaphoreCounterValue;
711 dispatch->dispatch_vkWaitSemaphores = vkr_dispatch_vkWaitSemaphores;
712 dispatch->dispatch_vkSignalSemaphore = vkr_dispatch_vkSignalSemaphore;
713
714 dispatch->dispatch_vkWaitSemaphoreResource100000MESA =
715 vkr_dispatch_vkWaitSemaphoreResource100000MESA;
716 dispatch->dispatch_vkImportSemaphoreResource100000MESA =
717 vkr_dispatch_vkImportSemaphoreResource100000MESA;
718 }
719
720 void
vkr_context_init_event_dispatch(struct vkr_context * ctx)721 vkr_context_init_event_dispatch(struct vkr_context *ctx)
722 {
723 struct vn_dispatch_context *dispatch = &ctx->dispatch;
724
725 dispatch->dispatch_vkCreateEvent = vkr_dispatch_vkCreateEvent;
726 dispatch->dispatch_vkDestroyEvent = vkr_dispatch_vkDestroyEvent;
727 dispatch->dispatch_vkGetEventStatus = vkr_dispatch_vkGetEventStatus;
728 dispatch->dispatch_vkSetEvent = vkr_dispatch_vkSetEvent;
729 dispatch->dispatch_vkResetEvent = vkr_dispatch_vkResetEvent;
730 }
731