1 /*
2 * Copyright © 2021 Collabora Ltd.
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "panvk_device.h"
7 #include "panvk_entrypoints.h"
8 #include "panvk_event.h"
9
10 #include "vk_log.h"
11
12 VKAPI_ATTR VkResult VKAPI_CALL
panvk_per_arch(CreateEvent)13 panvk_per_arch(CreateEvent)(VkDevice _device,
14 const VkEventCreateInfo *pCreateInfo,
15 const VkAllocationCallbacks *pAllocator,
16 VkEvent *pEvent)
17 {
18 VK_FROM_HANDLE(panvk_device, device, _device);
19 struct panvk_event *event = vk_object_zalloc(
20 &device->vk, pAllocator, sizeof(*event), VK_OBJECT_TYPE_EVENT);
21 if (!event)
22 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
23
24 struct drm_syncobj_create create = {
25 .flags = 0,
26 };
27
28 int ret = drmIoctl(device->vk.drm_fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
29 if (ret)
30 return VK_ERROR_OUT_OF_HOST_MEMORY;
31
32 event->syncobj = create.handle;
33 *pEvent = panvk_event_to_handle(event);
34
35 return VK_SUCCESS;
36 }
37
38 VKAPI_ATTR void VKAPI_CALL
panvk_per_arch(DestroyEvent)39 panvk_per_arch(DestroyEvent)(VkDevice _device, VkEvent _event,
40 const VkAllocationCallbacks *pAllocator)
41 {
42 VK_FROM_HANDLE(panvk_device, device, _device);
43 VK_FROM_HANDLE(panvk_event, event, _event);
44
45 if (!event)
46 return;
47
48 struct drm_syncobj_destroy destroy = {.handle = event->syncobj};
49 drmIoctl(device->vk.drm_fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
50
51 vk_object_free(&device->vk, pAllocator, event);
52 }
53
54 VKAPI_ATTR VkResult VKAPI_CALL
panvk_per_arch(GetEventStatus)55 panvk_per_arch(GetEventStatus)(VkDevice _device, VkEvent _event)
56 {
57 VK_FROM_HANDLE(panvk_device, device, _device);
58 VK_FROM_HANDLE(panvk_event, event, _event);
59 bool signaled;
60
61 struct drm_syncobj_wait wait = {
62 .handles = (uintptr_t)&event->syncobj,
63 .count_handles = 1,
64 .timeout_nsec = 0,
65 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
66 };
67
68 int ret = drmIoctl(device->vk.drm_fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
69 if (ret) {
70 if (errno == ETIME)
71 signaled = false;
72 else {
73 assert(0);
74 return VK_ERROR_DEVICE_LOST; /* TODO */
75 }
76 } else
77 signaled = true;
78
79 return signaled ? VK_EVENT_SET : VK_EVENT_RESET;
80 }
81
82 VKAPI_ATTR VkResult VKAPI_CALL
panvk_per_arch(SetEvent)83 panvk_per_arch(SetEvent)(VkDevice _device, VkEvent _event)
84 {
85 VK_FROM_HANDLE(panvk_device, device, _device);
86 VK_FROM_HANDLE(panvk_event, event, _event);
87
88 struct drm_syncobj_array objs = {
89 .handles = (uint64_t)(uintptr_t)&event->syncobj,
90 .count_handles = 1};
91
92 /* This is going to just replace the fence for this syncobj with one that
93 * is already in signaled state. This won't be a problem because the spec
94 * mandates that the event will have been set before the vkCmdWaitEvents
95 * command executes.
96 * https://www.khronos.org/registry/vulkan/specs/1.2/html/chap6.html#commandbuffers-submission-progress
97 */
98 if (drmIoctl(device->vk.drm_fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &objs))
99 return VK_ERROR_DEVICE_LOST;
100
101 return VK_SUCCESS;
102 }
103
104 VKAPI_ATTR VkResult VKAPI_CALL
panvk_per_arch(ResetEvent)105 panvk_per_arch(ResetEvent)(VkDevice _device, VkEvent _event)
106 {
107 VK_FROM_HANDLE(panvk_device, device, _device);
108 VK_FROM_HANDLE(panvk_event, event, _event);
109
110 struct drm_syncobj_array objs = {
111 .handles = (uint64_t)(uintptr_t)&event->syncobj,
112 .count_handles = 1};
113
114 if (drmIoctl(device->vk.drm_fd, DRM_IOCTL_SYNCOBJ_RESET, &objs))
115 return VK_ERROR_DEVICE_LOST;
116
117 return VK_SUCCESS;
118 }
119