xref: /aosp_15_r20/external/mesa3d/src/gallium/frontends/lavapipe/lvp_pipe_sync.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2022 Collabora Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "lvp_private.h"
25 #include "util/timespec.h"
26 #include "vk_common_entrypoints.h"
27 
28 static void
lvp_pipe_sync_validate(ASSERTED struct lvp_pipe_sync * sync)29 lvp_pipe_sync_validate(ASSERTED struct lvp_pipe_sync *sync)
30 {
31    if (sync->signaled)
32       assert(sync->fence == NULL);
33 }
34 
35 static VkResult
lvp_pipe_sync_init(UNUSED struct vk_device * vk_device,struct vk_sync * vk_sync,uint64_t initial_value)36 lvp_pipe_sync_init(UNUSED struct vk_device *vk_device,
37                    struct vk_sync *vk_sync,
38                    uint64_t initial_value)
39 {
40    struct lvp_pipe_sync *sync = vk_sync_as_lvp_pipe_sync(vk_sync);
41 
42    mtx_init(&sync->lock, mtx_plain);
43    cnd_init(&sync->changed);
44    sync->signaled = (initial_value != 0);
45    sync->fence = NULL;
46 
47    return VK_SUCCESS;
48 }
49 
50 static void
lvp_pipe_sync_finish(struct vk_device * vk_device,struct vk_sync * vk_sync)51 lvp_pipe_sync_finish(struct vk_device *vk_device,
52                      struct vk_sync *vk_sync)
53 {
54    struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
55    struct lvp_pipe_sync *sync = vk_sync_as_lvp_pipe_sync(vk_sync);
56 
57    lvp_pipe_sync_validate(sync);
58    if (sync->fence)
59       device->pscreen->fence_reference(device->pscreen, &sync->fence, NULL);
60    cnd_destroy(&sync->changed);
61    mtx_destroy(&sync->lock);
62 }
63 
64 void
lvp_pipe_sync_signal_with_fence(struct lvp_device * device,struct lvp_pipe_sync * sync,struct pipe_fence_handle * fence)65 lvp_pipe_sync_signal_with_fence(struct lvp_device *device,
66                                 struct lvp_pipe_sync *sync,
67                                 struct pipe_fence_handle *fence)
68 {
69    mtx_lock(&sync->lock);
70    lvp_pipe_sync_validate(sync);
71    sync->signaled = fence == NULL;
72    device->pscreen->fence_reference(device->pscreen, &sync->fence, fence);
73    cnd_broadcast(&sync->changed);
74    mtx_unlock(&sync->lock);
75 }
76 
77 static VkResult
lvp_pipe_sync_signal(struct vk_device * vk_device,struct vk_sync * vk_sync,uint64_t value)78 lvp_pipe_sync_signal(struct vk_device *vk_device,
79                      struct vk_sync *vk_sync,
80                      uint64_t value)
81 {
82    struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
83    struct lvp_pipe_sync *sync = vk_sync_as_lvp_pipe_sync(vk_sync);
84 
85    mtx_lock(&sync->lock);
86    lvp_pipe_sync_validate(sync);
87    sync->signaled = true;
88    if (sync->fence)
89       device->pscreen->fence_reference(device->pscreen, &sync->fence, NULL);
90    cnd_broadcast(&sync->changed);
91    mtx_unlock(&sync->lock);
92 
93    return VK_SUCCESS;
94 }
95 
96 static VkResult
lvp_pipe_sync_reset(struct vk_device * vk_device,struct vk_sync * vk_sync)97 lvp_pipe_sync_reset(struct vk_device *vk_device,
98                     struct vk_sync *vk_sync)
99 {
100    struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
101    struct lvp_pipe_sync *sync = vk_sync_as_lvp_pipe_sync(vk_sync);
102 
103    mtx_lock(&sync->lock);
104    lvp_pipe_sync_validate(sync);
105    sync->signaled = false;
106    if (sync->fence)
107       device->pscreen->fence_reference(device->pscreen, &sync->fence, NULL);
108    cnd_broadcast(&sync->changed);
109    mtx_unlock(&sync->lock);
110 
111    return VK_SUCCESS;
112 }
113 
114 static VkResult
lvp_pipe_sync_move(struct vk_device * vk_device,struct vk_sync * vk_dst,struct vk_sync * vk_src)115 lvp_pipe_sync_move(struct vk_device *vk_device,
116                    struct vk_sync *vk_dst,
117                    struct vk_sync *vk_src)
118 {
119    struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
120    struct lvp_pipe_sync *dst = vk_sync_as_lvp_pipe_sync(vk_dst);
121    struct lvp_pipe_sync *src = vk_sync_as_lvp_pipe_sync(vk_src);
122 
123    /* Pull the fence out of the source */
124    mtx_lock(&src->lock);
125    struct pipe_fence_handle *fence = src->fence;
126    bool signaled = src->signaled;
127    src->fence = NULL;
128    src->signaled = false;
129    cnd_broadcast(&src->changed);
130    mtx_unlock(&src->lock);
131 
132    mtx_lock(&dst->lock);
133    if (dst->fence)
134       device->pscreen->fence_reference(device->pscreen, &dst->fence, NULL);
135    dst->fence = fence;
136    dst->signaled = signaled;
137    cnd_broadcast(&dst->changed);
138    mtx_unlock(&dst->lock);
139 
140    return VK_SUCCESS;
141 }
142 
143 static VkResult
lvp_pipe_sync_wait_locked(struct lvp_device * device,struct lvp_pipe_sync * sync,uint64_t wait_value,enum vk_sync_wait_flags wait_flags,uint64_t abs_timeout_ns)144 lvp_pipe_sync_wait_locked(struct lvp_device *device,
145                           struct lvp_pipe_sync *sync,
146                           uint64_t wait_value,
147                           enum vk_sync_wait_flags wait_flags,
148                           uint64_t abs_timeout_ns)
149 {
150    assert(!(wait_flags & VK_SYNC_WAIT_ANY));
151 
152    lvp_pipe_sync_validate(sync);
153 
154    uint64_t now_ns = os_time_get_nano();
155    while (!sync->signaled && !sync->fence) {
156       if (now_ns >= abs_timeout_ns)
157          return VK_TIMEOUT;
158 
159       int ret;
160       if (abs_timeout_ns >= INT64_MAX) {
161          /* Common infinite wait case */
162          ret = cnd_wait(&sync->changed, &sync->lock);
163       } else {
164          /* This is really annoying.  The C11 threads API uses CLOCK_REALTIME
165           * while all our absolute timeouts are in CLOCK_MONOTONIC.  Best
166           * thing we can do is to convert and hope the system admin doesn't
167           * change the time out from under us.
168           */
169          uint64_t rel_timeout_ns = abs_timeout_ns - now_ns;
170 
171          struct timespec now_ts, abs_timeout_ts;
172          timespec_get(&now_ts, TIME_UTC);
173          if (timespec_add_nsec(&abs_timeout_ts, &now_ts, rel_timeout_ns)) {
174             /* Overflowed; may as well be infinite */
175             ret = cnd_wait(&sync->changed, &sync->lock);
176          } else {
177             ret = cnd_timedwait(&sync->changed, &sync->lock, &abs_timeout_ts);
178          }
179       }
180       if (ret == thrd_error)
181          return vk_errorf(device, VK_ERROR_UNKNOWN, "cnd_timedwait failed");
182 
183       lvp_pipe_sync_validate(sync);
184 
185       /* We don't trust the timeout condition on cnd_timedwait() because of
186        * the potential clock issues caused by using CLOCK_REALTIME.  Instead,
187        * update now_ns, go back to the top of the loop, and re-check.
188        */
189       now_ns = os_time_get_nano();
190    }
191 
192    if (sync->signaled || (wait_flags & VK_SYNC_WAIT_PENDING))
193       return VK_SUCCESS;
194 
195    /* Grab a reference before we drop the lock */
196    struct pipe_fence_handle *fence = NULL;
197    device->pscreen->fence_reference(device->pscreen, &fence, sync->fence);
198 
199    mtx_unlock(&sync->lock);
200 
201    uint64_t rel_timeout_ns =
202       now_ns >= abs_timeout_ns ? 0 : abs_timeout_ns - now_ns;
203    bool complete = device->pscreen->fence_finish(device->pscreen, NULL,
204                                                  fence, rel_timeout_ns);
205 
206    device->pscreen->fence_reference(device->pscreen, &fence, NULL);
207 
208    mtx_lock(&sync->lock);
209 
210    lvp_pipe_sync_validate(sync);
211 
212    if (!complete)
213       return VK_TIMEOUT;
214 
215    if (sync->fence == fence) {
216       device->pscreen->fence_reference(device->pscreen, &sync->fence, NULL);
217       sync->signaled = true;
218    }
219 
220    return VK_SUCCESS;
221 }
222 
223 static VkResult
lvp_pipe_sync_wait(struct vk_device * vk_device,struct vk_sync * vk_sync,uint64_t wait_value,enum vk_sync_wait_flags wait_flags,uint64_t abs_timeout_ns)224 lvp_pipe_sync_wait(struct vk_device *vk_device,
225                    struct vk_sync *vk_sync,
226                    uint64_t wait_value,
227                    enum vk_sync_wait_flags wait_flags,
228                    uint64_t abs_timeout_ns)
229 {
230    struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
231    struct lvp_pipe_sync *sync = vk_sync_as_lvp_pipe_sync(vk_sync);
232 
233    mtx_lock(&sync->lock);
234 
235    VkResult result = lvp_pipe_sync_wait_locked(device, sync, wait_value,
236                                                wait_flags, abs_timeout_ns);
237 
238    mtx_unlock(&sync->lock);
239 
240    return result;
241 }
242 
243 #ifdef HAVE_LIBDRM
244 static VkResult
lvp_pipe_import_sync_file(struct vk_device * vk_device,struct vk_sync * vk_sync,int sync_file)245 lvp_pipe_import_sync_file(struct vk_device *vk_device,
246                            struct vk_sync *vk_sync,
247                            int sync_file)
248 {
249    struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
250    struct lvp_pipe_sync *sync = vk_sync_as_lvp_pipe_sync(vk_sync);
251 
252    struct pipe_fence_handle *fence;
253    device->queue.ctx->create_fence_fd(
254          device->queue.ctx, &fence, sync_file, PIPE_FD_TYPE_NATIVE_SYNC);
255 
256    if (fence == NULL)
257       return VK_ERROR_OUT_OF_HOST_MEMORY;
258 
259    lvp_pipe_sync_signal_with_fence(device, sync, fence);
260    device->pscreen->fence_reference(device->pscreen, &fence, NULL);
261 
262    return VK_SUCCESS;
263 }
264 
265 static VkResult
lvp_pipe_export_sync_file(struct vk_device * vk_device,struct vk_sync * vk_sync,int * sync_file)266 lvp_pipe_export_sync_file(struct vk_device *vk_device,
267                           struct vk_sync *vk_sync,
268                           int *sync_file)
269 {
270    struct lvp_pipe_sync *sync = vk_sync_as_lvp_pipe_sync(vk_sync);
271 
272    /* It's not ideal, but since we cannot properly support sync files
273     * from userspace, what we will do instead is wait for lavapipe to
274     * finish rendering, so that we can safely export a sync file that
275     * has already been signalled.
276     */
277    vk_common_DeviceWaitIdle(vk_device_to_handle(vk_device));
278    struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
279    *sync_file = device->pscreen->fence_get_fd(device->pscreen, sync->fence);
280 
281    return *sync_file != -1 ? VK_SUCCESS : VK_ERROR_OUT_OF_HOST_MEMORY;
282 }
283 #endif
284 
285 const struct vk_sync_type lvp_pipe_sync_type = {
286    .size = sizeof(struct lvp_pipe_sync),
287    .features = VK_SYNC_FEATURE_BINARY |
288                VK_SYNC_FEATURE_GPU_WAIT |
289                VK_SYNC_FEATURE_GPU_MULTI_WAIT |
290                VK_SYNC_FEATURE_CPU_WAIT |
291                VK_SYNC_FEATURE_CPU_RESET |
292                VK_SYNC_FEATURE_CPU_SIGNAL |
293                VK_SYNC_FEATURE_WAIT_PENDING,
294    .init = lvp_pipe_sync_init,
295    .finish = lvp_pipe_sync_finish,
296    .signal = lvp_pipe_sync_signal,
297    .reset = lvp_pipe_sync_reset,
298    .move = lvp_pipe_sync_move,
299    .wait = lvp_pipe_sync_wait,
300 #ifdef HAVE_LIBDRM
301    .import_sync_file = lvp_pipe_import_sync_file,
302    .export_sync_file = lvp_pipe_export_sync_file,
303 #endif
304 };
305