1 /*
2 * Copyright © 2012 Rob Clark <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <[email protected]>
7 */
8
9 #include "util/os_file.h"
10 #include "util/u_inlines.h"
11
12 #include "freedreno_batch.h"
13 #include "freedreno_context.h"
14 #include "freedreno_fence.h"
15 #include "freedreno_util.h"
16 /* TODO: Use the interface drm/freedreno_drmif.h instead of calling directly */
17 #include <xf86drm.h>
18
19 static bool
fence_flush(struct pipe_context * pctx,struct pipe_fence_handle * fence,uint64_t timeout)20 fence_flush(struct pipe_context *pctx, struct pipe_fence_handle *fence,
21 uint64_t timeout)
22 /* NOTE: in the !fence_is_signalled() case we may be called from non-driver
23 * thread, but we don't call fd_batch_flush() in that case
24 */
25 in_dt
26 {
27 if (fence->flushed)
28 return true;
29
30 MESA_TRACE_FUNC();
31
32 if (!util_queue_fence_is_signalled(&fence->ready)) {
33 if (fence->tc_token) {
34 threaded_context_flush(pctx, fence->tc_token, timeout == 0);
35 }
36
37 if (!timeout)
38 return false;
39
40 if (timeout == OS_TIMEOUT_INFINITE) {
41 util_queue_fence_wait(&fence->ready);
42 } else {
43 int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
44 if (!util_queue_fence_wait_timeout(&fence->ready, abs_timeout)) {
45 return false;
46 }
47 }
48
49 goto out;
50 }
51
52 if (fence->batch)
53 fd_batch_flush(fence->batch);
54
55 out:
56 if (fence->fence)
57 fd_fence_flush(fence->fence);
58
59 assert(!fence->batch);
60 fence->flushed = true;
61 return true;
62 }
63
64 void
fd_pipe_fence_repopulate(struct pipe_fence_handle * fence,struct pipe_fence_handle * last_fence)65 fd_pipe_fence_repopulate(struct pipe_fence_handle *fence,
66 struct pipe_fence_handle *last_fence)
67 {
68 if (last_fence->last_fence)
69 fd_pipe_fence_repopulate(fence, last_fence->last_fence);
70
71 /* The fence we are re-populating must not be an fd-fence (but last_fince
72 * might have been)
73 */
74 assert(!fence->use_fence_fd);
75 assert(!last_fence->batch);
76
77 fd_pipe_fence_ref(&fence->last_fence, last_fence);
78
79 /* We have nothing to flush, so nothing will clear the batch reference
80 * (which is normally done when the batch is flushed), so do it now:
81 */
82 fd_pipe_fence_set_batch(fence, NULL);
83 }
84
85 static void
fd_fence_destroy(struct pipe_fence_handle * fence)86 fd_fence_destroy(struct pipe_fence_handle *fence)
87 {
88 fd_pipe_fence_ref(&fence->last_fence, NULL);
89
90 tc_unflushed_batch_token_reference(&fence->tc_token, NULL);
91
92 if (fence->syncobj)
93 drmSyncobjDestroy(fd_device_fd(fence->screen->dev), fence->syncobj);
94 fd_pipe_del(fence->pipe);
95 if (fence->fence)
96 fd_fence_del(fence->fence);
97
98 FREE(fence);
99 }
100
101 void
fd_pipe_fence_ref(struct pipe_fence_handle ** ptr,struct pipe_fence_handle * pfence)102 fd_pipe_fence_ref(struct pipe_fence_handle **ptr,
103 struct pipe_fence_handle *pfence)
104 {
105 if (pipe_reference(&(*ptr)->reference, &pfence->reference))
106 fd_fence_destroy(*ptr);
107
108 *ptr = pfence;
109 }
110
111 bool
fd_pipe_fence_finish(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_fence_handle * fence,uint64_t timeout)112 fd_pipe_fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
113 struct pipe_fence_handle *fence, uint64_t timeout)
114 {
115 MESA_TRACE_SCOPE(timeout ? "fd_pipe_fence_finish(wait)" : "fd_pipe_fence_finish(nowait)");
116
117 /* Note: for TC deferred fence, pctx->flush() may not have been called
118 * yet, so always do fence_flush() *first* before delegating to
119 * fence->last_fence
120 */
121 if (!fence_flush(pctx, fence, timeout))
122 return false;
123
124 if (fence->last_fence)
125 return fd_pipe_fence_finish(pscreen, pctx, fence->last_fence, timeout);
126
127 if (fence->last_fence)
128 fence = fence->last_fence;
129
130 if (fence->use_fence_fd) {
131 assert(fence->fence);
132 int ret = sync_wait(fence->fence->fence_fd, timeout / 1000000);
133 return ret == 0;
134 }
135
136 if (fd_pipe_wait_timeout(fence->pipe, fence->fence, timeout))
137 return false;
138
139 return true;
140 }
141
142 static struct pipe_fence_handle *
fence_create(struct fd_context * ctx,struct fd_batch * batch,int fence_fd,int syncobj)143 fence_create(struct fd_context *ctx, struct fd_batch *batch, int fence_fd,
144 int syncobj)
145 {
146 struct pipe_fence_handle *fence;
147
148 fence = CALLOC_STRUCT(pipe_fence_handle);
149 if (!fence)
150 return NULL;
151
152 pipe_reference_init(&fence->reference, 1);
153 util_queue_fence_init(&fence->ready);
154
155 fence->ctx = ctx;
156 fd_pipe_fence_set_batch(fence, batch);
157 fence->pipe = fd_pipe_ref(ctx->pipe);
158 fence->screen = ctx->screen;
159 fence->use_fence_fd = (fence_fd != -1);
160 fence->syncobj = syncobj;
161
162 if (fence_fd != -1) {
163 fence->fence = fd_fence_new(fence->pipe, fence->use_fence_fd);
164 fence->fence->fence_fd = fence_fd;
165 }
166
167 return fence;
168 }
169
170 void
fd_create_pipe_fence_fd(struct pipe_context * pctx,struct pipe_fence_handle ** pfence,int fd,enum pipe_fd_type type)171 fd_create_pipe_fence_fd(struct pipe_context *pctx, struct pipe_fence_handle **pfence,
172 int fd, enum pipe_fd_type type)
173 {
174 struct fd_context *ctx = fd_context(pctx);
175
176 switch (type) {
177 case PIPE_FD_TYPE_NATIVE_SYNC:
178 *pfence =
179 fence_create(fd_context(pctx), NULL, os_dupfd_cloexec(fd), 0);
180 break;
181 case PIPE_FD_TYPE_SYNCOBJ: {
182 int ret;
183 uint32_t syncobj;
184
185 assert(ctx->screen->has_syncobj);
186 ret = drmSyncobjFDToHandle(fd_device_fd(ctx->screen->dev), fd, &syncobj);
187 if (!ret)
188 close(fd);
189
190 *pfence = fence_create(fd_context(pctx), NULL, -1, syncobj);
191 break;
192 }
193 default:
194 unreachable("Unhandled fence type");
195 }
196 }
197
198 void
fd_pipe_fence_server_sync(struct pipe_context * pctx,struct pipe_fence_handle * fence)199 fd_pipe_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *fence)
200 {
201 struct fd_context *ctx = fd_context(pctx);
202
203 MESA_TRACE_FUNC();
204
205 /* NOTE: we don't expect the combination of fence-fd + async-flush-fence,
206 * so timeout==0 is ok here:
207 */
208 fence_flush(pctx, fence, 0);
209
210 if (fence->last_fence) {
211 fd_pipe_fence_server_sync(pctx, fence->last_fence);
212 return;
213 }
214
215 /* if not an external fence, then nothing more to do without preemption: */
216 if (!fence->use_fence_fd)
217 return;
218
219 ctx->no_implicit_sync = true;
220
221 assert(fence->fence);
222 if (sync_accumulate("freedreno", &ctx->in_fence_fd, fence->fence->fence_fd)) {
223 /* error */
224 }
225 }
226
227 void
fd_pipe_fence_server_signal(struct pipe_context * pctx,struct pipe_fence_handle * fence)228 fd_pipe_fence_server_signal(struct pipe_context *pctx,
229 struct pipe_fence_handle *fence)
230 {
231 struct fd_context *ctx = fd_context(pctx);
232
233 if (fence->syncobj) {
234 drmSyncobjSignal(fd_device_fd(ctx->screen->dev), &fence->syncobj, 1);
235 }
236 }
237
238 int
fd_pipe_fence_get_fd(struct pipe_screen * pscreen,struct pipe_fence_handle * fence)239 fd_pipe_fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *fence)
240 {
241 MESA_TRACE_FUNC();
242
243 /* We don't expect deferred flush to be combined with fence-fd: */
244 assert(!fence->last_fence);
245
246 assert(fence->use_fence_fd);
247
248 /* NOTE: in the deferred fence case, the pctx we want is the threaded-ctx
249 * but if TC is not used, this will be null. Which is fine, we won't call
250 * threaded_context_flush() in that case
251 */
252 fence_flush(&fence->ctx->tc->base, fence, OS_TIMEOUT_INFINITE);
253 assert(fence->fence);
254 return os_dupfd_cloexec(fence->fence->fence_fd);
255 }
256
257 bool
fd_pipe_fence_is_fd(struct pipe_fence_handle * fence)258 fd_pipe_fence_is_fd(struct pipe_fence_handle *fence)
259 {
260 return fence->use_fence_fd;
261 }
262
263 struct pipe_fence_handle *
fd_pipe_fence_create(struct fd_batch * batch)264 fd_pipe_fence_create(struct fd_batch *batch)
265 {
266 return fence_create(batch->ctx, batch, -1, 0);
267 }
268
269 void
fd_pipe_fence_set_batch(struct pipe_fence_handle * fence,struct fd_batch * batch)270 fd_pipe_fence_set_batch(struct pipe_fence_handle *fence, struct fd_batch *batch)
271 {
272 if (batch) {
273 assert(!fence->batch);
274 fd_batch_reference(&fence->batch, batch);
275 fd_batch_needs_flush(batch);
276 } else {
277 fd_batch_reference(&fence->batch, NULL);
278
279 /* When the batch is dis-associated with the fence, we can signal TC
280 * that the fence is flushed
281 */
282 if (fence->needs_signal) {
283 util_queue_fence_signal(&fence->ready);
284 fence->needs_signal = false;
285 }
286 }
287 }
288
289 void
fd_pipe_fence_set_submit_fence(struct pipe_fence_handle * fence,struct fd_fence * submit_fence)290 fd_pipe_fence_set_submit_fence(struct pipe_fence_handle *fence,
291 struct fd_fence *submit_fence)
292 {
293 /* Take ownership of the drm fence after batch/submit is flushed: */
294 assert(!fence->fence);
295 fence->fence = submit_fence;
296 fd_pipe_fence_set_batch(fence, NULL);
297 }
298
299 struct pipe_fence_handle *
fd_pipe_fence_create_unflushed(struct pipe_context * pctx,struct tc_unflushed_batch_token * tc_token)300 fd_pipe_fence_create_unflushed(struct pipe_context *pctx,
301 struct tc_unflushed_batch_token *tc_token)
302 {
303 struct pipe_fence_handle *fence =
304 fence_create(fd_context(pctx), NULL, -1, 0);
305 fence->needs_signal = true;
306 util_queue_fence_reset(&fence->ready);
307 tc_unflushed_batch_token_reference(&fence->tc_token, tc_token);
308 return fence;
309 }
310