1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "nouveau_screen.h"
24 #include "nouveau_context.h"
25 #include "nouveau_winsys.h"
26 #include "nouveau_fence.h"
27 #include "util/os_time.h"
28
29 #if DETECT_OS_POSIX
30 #include <sched.h>
31 #endif
32
33 static bool
34 _nouveau_fence_wait(struct nouveau_fence *fence, struct util_debug_callback *debug);
35
36 bool
nouveau_fence_new(struct nouveau_context * nv,struct nouveau_fence ** fence)37 nouveau_fence_new(struct nouveau_context *nv, struct nouveau_fence **fence)
38 {
39 *fence = CALLOC_STRUCT(nouveau_fence);
40 if (!*fence)
41 return false;
42
43 int ret = nouveau_bo_new(nv->screen->device, NOUVEAU_BO_GART, 0x1000, 0x1000, NULL, &(*fence)->bo);
44 if (ret) {
45 FREE(*fence);
46 return false;
47 }
48
49 (*fence)->screen = nv->screen;
50 (*fence)->context = nv;
51 (*fence)->ref = 1;
52 list_inithead(&(*fence)->work);
53
54 return true;
55 }
56
57 static void
nouveau_fence_trigger_work(struct nouveau_fence * fence)58 nouveau_fence_trigger_work(struct nouveau_fence *fence)
59 {
60 simple_mtx_assert_locked(&fence->screen->fence.lock);
61
62 struct nouveau_fence_work *work, *tmp;
63
64 LIST_FOR_EACH_ENTRY_SAFE(work, tmp, &fence->work, list) {
65 work->func(work->data);
66 list_del(&work->list);
67 FREE(work);
68 }
69 }
70
71 static void
_nouveau_fence_emit(struct nouveau_fence * fence)72 _nouveau_fence_emit(struct nouveau_fence *fence)
73 {
74 struct nouveau_screen *screen = fence->screen;
75 struct nouveau_fence_list *fence_list = &screen->fence;
76
77 simple_mtx_assert_locked(&fence_list->lock);
78
79 assert(fence->state != NOUVEAU_FENCE_STATE_EMITTING);
80 if (fence->state >= NOUVEAU_FENCE_STATE_EMITTED)
81 return;
82
83 /* set this now, so that if fence.emit triggers a flush we don't recurse */
84 fence->state = NOUVEAU_FENCE_STATE_EMITTING;
85
86 p_atomic_inc(&fence->ref);
87
88 if (fence_list->tail)
89 fence_list->tail->next = fence;
90 else
91 fence_list->head = fence;
92
93 fence_list->tail = fence;
94
95 fence_list->emit(&fence->context->pipe, &fence->sequence, fence->bo);
96
97 assert(fence->state == NOUVEAU_FENCE_STATE_EMITTING);
98 fence->state = NOUVEAU_FENCE_STATE_EMITTED;
99 }
100
101 static void
nouveau_fence_del(struct nouveau_fence * fence)102 nouveau_fence_del(struct nouveau_fence *fence)
103 {
104 struct nouveau_fence *it;
105 struct nouveau_fence_list *fence_list = &fence->screen->fence;
106
107 simple_mtx_assert_locked(&fence_list->lock);
108
109 if (fence->state == NOUVEAU_FENCE_STATE_EMITTED ||
110 fence->state == NOUVEAU_FENCE_STATE_FLUSHED) {
111 if (fence == fence_list->head) {
112 fence_list->head = fence->next;
113 if (!fence_list->head)
114 fence_list->tail = NULL;
115 } else {
116 for (it = fence_list->head; it && it->next != fence; it = it->next);
117 it->next = fence->next;
118 if (fence_list->tail == fence)
119 fence_list->tail = it;
120 }
121 }
122
123 if (!list_is_empty(&fence->work)) {
124 debug_printf("WARNING: deleting fence with work still pending !\n");
125 nouveau_fence_trigger_work(fence);
126 }
127
128 nouveau_bo_ref(NULL, &fence->bo);
129 FREE(fence);
130 }
131
132 void
nouveau_fence_cleanup(struct nouveau_context * nv)133 nouveau_fence_cleanup(struct nouveau_context *nv)
134 {
135 if (nv->fence) {
136 struct nouveau_fence_list *fence_list = &nv->screen->fence;
137 struct nouveau_fence *current = NULL;
138
139 /* nouveau_fence_wait will create a new current fence, so wait on the
140 * _current_ one, and remove both.
141 */
142 simple_mtx_lock(&fence_list->lock);
143 _nouveau_fence_ref(nv->fence, ¤t);
144 _nouveau_fence_wait(current, NULL);
145 _nouveau_fence_ref(NULL, ¤t);
146 _nouveau_fence_ref(NULL, &nv->fence);
147 simple_mtx_unlock(&fence_list->lock);
148 }
149 }
150
151 void
_nouveau_fence_update(struct nouveau_screen * screen,bool flushed)152 _nouveau_fence_update(struct nouveau_screen *screen, bool flushed)
153 {
154 struct nouveau_fence *fence;
155 struct nouveau_fence *next = NULL;
156 struct nouveau_fence_list *fence_list = &screen->fence;
157 u32 sequence = fence_list->update(&screen->base);
158
159 simple_mtx_assert_locked(&fence_list->lock);
160
161 /* If running under drm-shim, let all fences be signalled so things run to
162 * completion (avoids a hang at the end of shader-db).
163 */
164 if (unlikely(screen->disable_fences))
165 sequence = screen->fence.sequence;
166
167 if (fence_list->sequence_ack == sequence)
168 return;
169 fence_list->sequence_ack = sequence;
170
171 for (fence = fence_list->head; fence; fence = next) {
172 next = fence->next;
173 sequence = fence->sequence;
174
175 fence->state = NOUVEAU_FENCE_STATE_SIGNALLED;
176
177 nouveau_fence_trigger_work(fence);
178 _nouveau_fence_ref(NULL, &fence);
179
180 if (sequence == fence_list->sequence_ack)
181 break;
182 }
183 fence_list->head = next;
184 if (!next)
185 fence_list->tail = NULL;
186
187 if (flushed) {
188 for (fence = next; fence; fence = fence->next)
189 if (fence->state == NOUVEAU_FENCE_STATE_EMITTED)
190 fence->state = NOUVEAU_FENCE_STATE_FLUSHED;
191 }
192 }
193
194 #define NOUVEAU_FENCE_MAX_SPINS (1 << 31)
195
196 static bool
_nouveau_fence_signalled(struct nouveau_fence * fence)197 _nouveau_fence_signalled(struct nouveau_fence *fence)
198 {
199 struct nouveau_screen *screen = fence->screen;
200
201 simple_mtx_assert_locked(&screen->fence.lock);
202
203 if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
204 return true;
205
206 if (fence->state >= NOUVEAU_FENCE_STATE_EMITTED)
207 _nouveau_fence_update(screen, false);
208
209 return fence->state == NOUVEAU_FENCE_STATE_SIGNALLED;
210 }
211
212 static bool
nouveau_fence_kick(struct nouveau_fence * fence)213 nouveau_fence_kick(struct nouveau_fence *fence)
214 {
215 struct nouveau_context *context = fence->context;
216 struct nouveau_screen *screen = fence->screen;
217 struct nouveau_fence_list *fence_list = &screen->fence;
218 bool current = !fence->sequence;
219
220 simple_mtx_assert_locked(&fence_list->lock);
221
222 /* wtf, someone is waiting on a fence in flush_notify handler? */
223 assert(fence->state != NOUVEAU_FENCE_STATE_EMITTING);
224
225 if (fence->state < NOUVEAU_FENCE_STATE_EMITTED) {
226 if (PUSH_AVAIL(context->pushbuf) < 16)
227 nouveau_pushbuf_space(context->pushbuf, 16, 0, 0);
228 _nouveau_fence_emit(fence);
229 }
230
231 if (fence->state < NOUVEAU_FENCE_STATE_FLUSHED) {
232 if (nouveau_pushbuf_kick(context->pushbuf))
233 return false;
234 }
235
236 if (current)
237 _nouveau_fence_next(fence->context);
238
239 _nouveau_fence_update(screen, false);
240
241 return true;
242 }
243
244 static bool
_nouveau_fence_wait(struct nouveau_fence * fence,struct util_debug_callback * debug)245 _nouveau_fence_wait(struct nouveau_fence *fence, struct util_debug_callback *debug)
246 {
247 struct nouveau_screen *screen = fence->screen;
248 struct nouveau_fence_list *fence_list = &screen->fence;
249 int64_t start = 0;
250
251 simple_mtx_assert_locked(&fence_list->lock);
252
253 if (debug && debug->debug_message)
254 start = os_time_get_nano();
255
256 if (!nouveau_fence_kick(fence))
257 return false;
258
259 if (fence->state < NOUVEAU_FENCE_STATE_SIGNALLED) {
260 NOUVEAU_DRV_STAT(screen, any_non_kernel_fence_sync_count, 1);
261 int ret = nouveau_bo_wait(fence->bo, NOUVEAU_BO_RDWR, screen->client);
262 if (ret) {
263 debug_printf("Wait on fence %u (ack = %u, next = %u) errored with %s !\n",
264 fence->sequence,
265 fence_list->sequence_ack, fence_list->sequence, strerror(ret));
266 return false;
267 }
268
269 _nouveau_fence_update(screen, false);
270 if (fence->state != NOUVEAU_FENCE_STATE_SIGNALLED)
271 return false;
272
273 if (debug && debug->debug_message)
274 util_debug_message(debug, PERF_INFO,
275 "stalled %.3f ms waiting for fence",
276 (os_time_get_nano() - start) / 1000000.f);
277 }
278
279 return true;
280 }
281
282 void
_nouveau_fence_next(struct nouveau_context * nv)283 _nouveau_fence_next(struct nouveau_context *nv)
284 {
285 struct nouveau_fence_list *fence_list = &nv->screen->fence;
286
287 simple_mtx_assert_locked(&fence_list->lock);
288
289 if (nv->fence->state < NOUVEAU_FENCE_STATE_EMITTING) {
290 if (p_atomic_read(&nv->fence->ref) > 1)
291 _nouveau_fence_emit(nv->fence);
292 else
293 return;
294 }
295
296 _nouveau_fence_ref(NULL, &nv->fence);
297
298 nouveau_fence_new(nv, &nv->fence);
299 }
300
301 void
nouveau_fence_unref_bo(void * data)302 nouveau_fence_unref_bo(void *data)
303 {
304 struct nouveau_bo *bo = data;
305
306 nouveau_bo_ref(NULL, &bo);
307 }
308
309 bool
nouveau_fence_work(struct nouveau_fence * fence,void (* func)(void *),void * data)310 nouveau_fence_work(struct nouveau_fence *fence,
311 void (*func)(void *), void *data)
312 {
313 struct nouveau_fence_work *work;
314 struct nouveau_screen *screen;
315
316 if (!fence || fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) {
317 func(data);
318 return true;
319 }
320
321 work = CALLOC_STRUCT(nouveau_fence_work);
322 if (!work)
323 return false;
324 work->func = func;
325 work->data = data;
326
327 /* the fence might get deleted by fence_kick */
328 screen = fence->screen;
329
330 simple_mtx_lock(&screen->fence.lock);
331 list_add(&work->list, &fence->work);
332 if (++fence->work_count > 64)
333 nouveau_fence_kick(fence);
334 simple_mtx_unlock(&screen->fence.lock);
335 return true;
336 }
337
338 void
_nouveau_fence_ref(struct nouveau_fence * fence,struct nouveau_fence ** ref)339 _nouveau_fence_ref(struct nouveau_fence *fence, struct nouveau_fence **ref)
340 {
341 if (fence)
342 p_atomic_inc(&fence->ref);
343
344 if (*ref) {
345 simple_mtx_assert_locked(&(*ref)->screen->fence.lock);
346 if (p_atomic_dec_zero(&(*ref)->ref))
347 nouveau_fence_del(*ref);
348 }
349
350 *ref = fence;
351 }
352
353 void
nouveau_fence_ref(struct nouveau_fence * fence,struct nouveau_fence ** ref)354 nouveau_fence_ref(struct nouveau_fence *fence, struct nouveau_fence **ref)
355 {
356 struct nouveau_fence_list *fence_list = NULL;
357 if (ref && *ref)
358 fence_list = &(*ref)->screen->fence;
359
360 if (fence_list)
361 simple_mtx_lock(&fence_list->lock);
362
363 _nouveau_fence_ref(fence, ref);
364
365 if (fence_list)
366 simple_mtx_unlock(&fence_list->lock);
367 }
368
369 bool
nouveau_fence_wait(struct nouveau_fence * fence,struct util_debug_callback * debug)370 nouveau_fence_wait(struct nouveau_fence *fence, struct util_debug_callback *debug)
371 {
372 struct nouveau_fence_list *fence_list = &fence->screen->fence;
373 simple_mtx_lock(&fence_list->lock);
374 bool res = _nouveau_fence_wait(fence, debug);
375 simple_mtx_unlock(&fence_list->lock);
376 return res;
377 }
378
379 void
nouveau_fence_next_if_current(struct nouveau_context * nv,struct nouveau_fence * fence)380 nouveau_fence_next_if_current(struct nouveau_context *nv, struct nouveau_fence *fence)
381 {
382 simple_mtx_lock(&fence->screen->fence.lock);
383 if (nv->fence == fence)
384 _nouveau_fence_next(nv);
385 simple_mtx_unlock(&fence->screen->fence.lock);
386 }
387
388 bool
nouveau_fence_signalled(struct nouveau_fence * fence)389 nouveau_fence_signalled(struct nouveau_fence *fence)
390 {
391 simple_mtx_lock(&fence->screen->fence.lock);
392 bool ret = _nouveau_fence_signalled(fence);
393 simple_mtx_unlock(&fence->screen->fence.lock);
394 return ret;
395 }
396