1 /*
2 * Copyright © 2016 Rob Clark <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <[email protected]>
7 */
8
9 #include "util/hash_table.h"
10 #include "util/list.h"
11 #include "util/set.h"
12 #include "util/u_string.h"
13
14 #include "freedreno_batch.h"
15 #include "freedreno_context.h"
16 #include "freedreno_fence.h"
17 #include "freedreno_query_hw.h"
18 #include "freedreno_resource.h"
19
20 static struct fd_ringbuffer *
alloc_ring(struct fd_batch * batch,unsigned sz,enum fd_ringbuffer_flags flags)21 alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags)
22 {
23 struct fd_context *ctx = batch->ctx;
24
25 /* if kernel is too old to support unlimited # of cmd buffers, we
26 * have no option but to allocate large worst-case sizes so that
27 * we don't need to grow the ringbuffer. Performance is likely to
28 * suffer, but there is no good alternative.
29 *
30 * Otherwise if supported, allocate a growable ring with initial
31 * size of zero.
32 */
33 if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) &&
34 !FD_DBG(NOGROW)) {
35 flags |= FD_RINGBUFFER_GROWABLE;
36 sz = 0;
37 }
38
39 return fd_submit_new_ringbuffer(batch->submit, sz, flags);
40 }
41
42 static struct fd_batch_subpass *
subpass_create(struct fd_batch * batch)43 subpass_create(struct fd_batch *batch)
44 {
45 struct fd_batch_subpass *subpass = CALLOC_STRUCT(fd_batch_subpass);
46
47 subpass->draw = alloc_ring(batch, 0x100000, 0);
48
49 /* Replace batch->draw with reference to current subpass, for
50 * backwards compat with code that is not subpass aware.
51 */
52 if (batch->draw)
53 fd_ringbuffer_del(batch->draw);
54 batch->draw = fd_ringbuffer_ref(subpass->draw);
55
56 list_addtail(&subpass->node, &batch->subpasses);
57
58 return subpass;
59 }
60
61 static void
subpass_destroy(struct fd_batch_subpass * subpass)62 subpass_destroy(struct fd_batch_subpass *subpass)
63 {
64 fd_ringbuffer_del(subpass->draw);
65 if (subpass->subpass_clears)
66 fd_ringbuffer_del(subpass->subpass_clears);
67 list_del(&subpass->node);
68 if (subpass->lrz)
69 fd_bo_del(subpass->lrz);
70 free(subpass);
71 }
72
73 struct fd_batch *
fd_batch_create(struct fd_context * ctx,bool nondraw)74 fd_batch_create(struct fd_context *ctx, bool nondraw)
75 {
76 struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
77
78 if (!batch)
79 return NULL;
80
81 DBG("%p", batch);
82
83 pipe_reference_init(&batch->reference, 1);
84 batch->ctx = ctx;
85 batch->nondraw = nondraw;
86
87 batch->resources =
88 _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
89
90 list_inithead(&batch->subpasses);
91
92 batch->submit = fd_submit_new(ctx->pipe);
93 if (batch->nondraw) {
94 batch->gmem = alloc_ring(batch, 0x1000, FD_RINGBUFFER_PRIMARY);
95 } else {
96 batch->gmem = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
97
98 /* a6xx+ re-uses draw rb for both draw and binning pass: */
99 if (ctx->screen->gen < 6) {
100 batch->binning = alloc_ring(batch, 0x100000, 0);
101 }
102 }
103
104 /* Pre-attach private BOs: */
105 for (unsigned i = 0; i < ctx->num_private_bos; i++)
106 fd_ringbuffer_attach_bo(batch->gmem, ctx->private_bos[i]);
107
108 batch->subpass = subpass_create(batch);
109
110 batch->in_fence_fd = -1;
111 batch->fence = NULL;
112
113 /* Work around problems on earlier gens with submit merging, etc,
114 * by always creating a fence to request that the submit is flushed
115 * immediately:
116 */
117 if (ctx->screen->gen < 6)
118 batch->fence = fd_pipe_fence_create(batch);
119
120 fd_reset_wfi(batch);
121
122 util_dynarray_init(&batch->draw_patches, NULL);
123 util_dynarray_init(&(batch->fb_read_patches), NULL);
124
125 if (is_a2xx(ctx->screen)) {
126 util_dynarray_init(&batch->shader_patches, NULL);
127 util_dynarray_init(&batch->gmem_patches, NULL);
128 }
129
130 if (is_a3xx(ctx->screen))
131 util_dynarray_init(&batch->rbrc_patches, NULL);
132
133 util_dynarray_init(&batch->samples, NULL);
134
135 u_trace_init(&batch->trace, &ctx->trace_context);
136 batch->last_timestamp_cmd = NULL;
137
138 return batch;
139 }
140
141 struct fd_batch_subpass *
fd_batch_create_subpass(struct fd_batch * batch)142 fd_batch_create_subpass(struct fd_batch *batch)
143 {
144 assert(!batch->nondraw);
145
146 struct fd_batch_subpass *subpass = subpass_create(batch);
147
148 /* This new subpass inherits the current subpass.. this is replaced
149 * if there is a depth clear
150 */
151 if (batch->subpass->lrz)
152 subpass->lrz = fd_bo_ref(batch->subpass->lrz);
153
154 batch->subpass = subpass;
155
156 return subpass;
157 }
158
159 /**
160 * Cleanup that we normally do when the submit is flushed, like dropping
161 * rb references. But also called when batch is destroyed just in case
162 * it wasn't flushed.
163 */
164 static void
cleanup_submit(struct fd_batch * batch)165 cleanup_submit(struct fd_batch *batch)
166 {
167 if (!batch->submit)
168 return;
169
170 foreach_subpass_safe (subpass, batch) {
171 subpass_destroy(subpass);
172 }
173
174 fd_ringbuffer_del(batch->draw);
175 fd_ringbuffer_del(batch->gmem);
176
177 if (batch->binning) {
178 fd_ringbuffer_del(batch->binning);
179 batch->binning = NULL;
180 }
181
182 if (batch->prologue) {
183 fd_ringbuffer_del(batch->prologue);
184 batch->prologue = NULL;
185 }
186
187 if (batch->tile_epilogue) {
188 fd_ringbuffer_del(batch->tile_epilogue);
189 batch->tile_epilogue = NULL;
190 }
191
192 if (batch->epilogue) {
193 fd_ringbuffer_del(batch->epilogue);
194 batch->epilogue = NULL;
195 }
196
197 if (batch->tile_loads) {
198 fd_ringbuffer_del(batch->tile_loads);
199 batch->tile_loads = NULL;
200 }
201
202 if (batch->tile_store) {
203 fd_ringbuffer_del(batch->tile_store);
204 batch->tile_store = NULL;
205 }
206
207 fd_submit_del(batch->submit);
208 batch->submit = NULL;
209 }
210
211 static void
batch_flush_dependencies(struct fd_batch * batch)212 batch_flush_dependencies(struct fd_batch *batch) assert_dt
213 {
214 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
215 struct fd_batch *dep;
216
217 foreach_batch (dep, cache, batch->dependents_mask) {
218 assert(dep->ctx == batch->ctx);
219 fd_batch_flush(dep);
220 fd_batch_reference(&dep, NULL);
221 }
222
223 batch->dependents_mask = 0;
224 }
225
226 static void
batch_reset_dependencies(struct fd_batch * batch)227 batch_reset_dependencies(struct fd_batch *batch)
228 {
229 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
230 struct fd_batch *dep;
231
232 foreach_batch (dep, cache, batch->dependents_mask) {
233 fd_batch_reference(&dep, NULL);
234 }
235
236 batch->dependents_mask = 0;
237 }
238
239 static void
batch_reset_resources(struct fd_batch * batch)240 batch_reset_resources(struct fd_batch *batch)
241 {
242 fd_screen_assert_locked(batch->ctx->screen);
243
244 set_foreach (batch->resources, entry) {
245 struct fd_resource *rsc = (struct fd_resource *)entry->key;
246 _mesa_set_remove(batch->resources, entry);
247 assert(rsc->track->batch_mask & (1 << batch->idx));
248 rsc->track->batch_mask &= ~(1 << batch->idx);
249 if (rsc->track->write_batch == batch)
250 fd_batch_reference_locked(&rsc->track->write_batch, NULL);
251 }
252 }
253
254 void
__fd_batch_destroy_locked(struct fd_batch * batch)255 __fd_batch_destroy_locked(struct fd_batch *batch)
256 {
257 struct fd_context *ctx = batch->ctx;
258
259 DBG("%p", batch);
260
261 fd_screen_assert_locked(batch->ctx->screen);
262
263 fd_bc_invalidate_batch(batch, true);
264
265 batch_reset_resources(batch);
266 assert(batch->resources->entries == 0);
267 _mesa_set_destroy(batch->resources, NULL);
268
269 fd_screen_unlock(ctx->screen);
270 batch_reset_dependencies(batch);
271 assert(batch->dependents_mask == 0);
272
273 util_copy_framebuffer_state(&batch->framebuffer, NULL);
274
275 pipe_resource_reference(&batch->query_buf, NULL);
276
277 if (batch->in_fence_fd != -1)
278 close(batch->in_fence_fd);
279
280 /* in case batch wasn't flushed but fence was created: */
281 if (batch->fence)
282 fd_pipe_fence_set_batch(batch->fence, NULL);
283
284 fd_pipe_fence_ref(&batch->fence, NULL);
285
286 cleanup_submit(batch);
287
288 util_dynarray_fini(&batch->draw_patches);
289 util_dynarray_fini(&(batch->fb_read_patches));
290
291 if (is_a2xx(batch->ctx->screen)) {
292 util_dynarray_fini(&batch->shader_patches);
293 util_dynarray_fini(&batch->gmem_patches);
294 }
295
296 if (is_a3xx(batch->ctx->screen))
297 util_dynarray_fini(&batch->rbrc_patches);
298
299 while (batch->samples.size > 0) {
300 struct fd_hw_sample *samp =
301 util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
302 fd_hw_sample_reference(batch->ctx, &samp, NULL);
303 }
304 util_dynarray_fini(&batch->samples);
305
306 u_trace_fini(&batch->trace);
307
308 free(batch->key);
309 free(batch);
310 fd_screen_lock(ctx->screen);
311 }
312
313 void
__fd_batch_destroy(struct fd_batch * batch)314 __fd_batch_destroy(struct fd_batch *batch)
315 {
316 struct fd_screen *screen = batch->ctx->screen;
317 fd_screen_lock(screen);
318 __fd_batch_destroy_locked(batch);
319 fd_screen_unlock(screen);
320 }
321
322 void
__fd_batch_describe(char * buf,const struct fd_batch * batch)323 __fd_batch_describe(char *buf, const struct fd_batch *batch)
324 {
325 sprintf(buf, "fd_batch<%u>", batch->seqno);
326 }
327
328 /* Get per-batch prologue */
329 struct fd_ringbuffer *
fd_batch_get_prologue(struct fd_batch * batch)330 fd_batch_get_prologue(struct fd_batch *batch)
331 {
332 if (!batch->prologue)
333 batch->prologue = alloc_ring(batch, 0x1000, 0);
334 return batch->prologue;
335 }
336
337 /* Only called from fd_batch_flush() */
338 static void
batch_flush(struct fd_batch * batch)339 batch_flush(struct fd_batch *batch) assert_dt
340 {
341 DBG("%p: needs_flush=%d", batch, batch->needs_flush);
342
343 if (batch->flushed)
344 return;
345
346 tc_assert_driver_thread(batch->ctx->tc);
347
348 batch->needs_flush = false;
349
350 /* close out the draw cmds by making sure any active queries are
351 * paused:
352 */
353 fd_batch_finish_queries(batch);
354
355 batch_flush_dependencies(batch);
356
357 fd_screen_lock(batch->ctx->screen);
358 batch_reset_resources(batch);
359 /* NOTE: remove=false removes the batch from the hashtable, so future
360 * lookups won't cache-hit a flushed batch, but leaves the weak reference
361 * to the batch to avoid having multiple batches with same batch->idx, as
362 * that causes all sorts of hilarity.
363 */
364 fd_bc_invalidate_batch(batch, false);
365 batch->flushed = true;
366
367 if (batch == batch->ctx->batch)
368 fd_batch_reference_locked(&batch->ctx->batch, NULL);
369
370 if (batch == batch->ctx->batch_nondraw)
371 fd_batch_reference_locked(&batch->ctx->batch_nondraw, NULL);
372
373 fd_screen_unlock(batch->ctx->screen);
374
375 if (batch->fence)
376 fd_pipe_fence_ref(&batch->ctx->last_fence, batch->fence);
377
378 fd_gmem_render_tiles(batch);
379
380 assert(batch->reference.count > 0);
381
382 cleanup_submit(batch);
383 }
384
385 void
fd_batch_set_fb(struct fd_batch * batch,const struct pipe_framebuffer_state * pfb)386 fd_batch_set_fb(struct fd_batch *batch, const struct pipe_framebuffer_state *pfb)
387 {
388 assert(!batch->nondraw);
389
390 util_copy_framebuffer_state(&batch->framebuffer, pfb);
391
392 if (!pfb->zsbuf)
393 return;
394
395 struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
396
397 /* Switching back to a batch we'd previously started constructing shouldn't
398 * result in a different lrz. The dependency tracking should avoid another
399 * batch writing/clearing our depth buffer.
400 */
401 if (batch->subpass->lrz) {
402 assert(batch->subpass->lrz == zsbuf->lrz);
403 } else if (zsbuf->lrz) {
404 batch->subpass->lrz = fd_bo_ref(zsbuf->lrz);
405 }
406 }
407
408
409 /* NOTE: could drop the last ref to batch
410 */
411 void
fd_batch_flush(struct fd_batch * batch)412 fd_batch_flush(struct fd_batch *batch)
413 {
414 struct fd_batch *tmp = NULL;
415
416 /* NOTE: we need to hold an extra ref across the body of flush,
417 * since the last ref to this batch could be dropped when cleaning
418 * up used_resources
419 */
420 fd_batch_reference(&tmp, batch);
421 batch_flush(tmp);
422 fd_batch_reference(&tmp, NULL);
423 }
424
425 /* find a batches dependents mask, including recursive dependencies: */
426 static uint32_t
recursive_dependents_mask(struct fd_batch * batch)427 recursive_dependents_mask(struct fd_batch *batch)
428 {
429 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
430 struct fd_batch *dep;
431 uint32_t dependents_mask = batch->dependents_mask;
432
433 foreach_batch (dep, cache, batch->dependents_mask)
434 dependents_mask |= recursive_dependents_mask(dep);
435
436 return dependents_mask;
437 }
438
439 bool
fd_batch_has_dep(struct fd_batch * batch,struct fd_batch * dep)440 fd_batch_has_dep(struct fd_batch *batch, struct fd_batch *dep)
441 {
442 return !!(batch->dependents_mask & (1 << dep->idx));
443 }
444
445 void
fd_batch_add_dep(struct fd_batch * batch,struct fd_batch * dep)446 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
447 {
448 fd_screen_assert_locked(batch->ctx->screen);
449
450 assert(batch->ctx == dep->ctx);
451
452 if (fd_batch_has_dep(batch, dep))
453 return;
454
455 /* a loop should not be possible */
456 assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
457
458 struct fd_batch *other = NULL;
459 fd_batch_reference_locked(&other, dep);
460 batch->dependents_mask |= (1 << dep->idx);
461 DBG("%p: added dependency on %p", batch, dep);
462 }
463
464 static void
flush_write_batch(struct fd_resource * rsc)465 flush_write_batch(struct fd_resource *rsc) assert_dt
466 {
467 struct fd_batch *b = NULL;
468 fd_batch_reference_locked(&b, rsc->track->write_batch);
469
470 fd_screen_unlock(b->ctx->screen);
471 fd_batch_flush(b);
472 fd_screen_lock(b->ctx->screen);
473
474 fd_batch_reference_locked(&b, NULL);
475 }
476
477 static void
fd_batch_add_resource(struct fd_batch * batch,struct fd_resource * rsc)478 fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
479 {
480 if (likely(fd_batch_references_resource(batch, rsc))) {
481 assert(_mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc));
482 return;
483 }
484
485 assert(!_mesa_set_search(batch->resources, rsc));
486
487 _mesa_set_add_pre_hashed(batch->resources, rsc->hash, rsc);
488 rsc->track->batch_mask |= (1 << batch->idx);
489
490 fd_ringbuffer_attach_bo(batch->draw, rsc->bo);
491 if (unlikely(rsc->b.b.next)) {
492 struct fd_resource *n = fd_resource(rsc->b.b.next);
493 fd_ringbuffer_attach_bo(batch->draw, n->bo);
494 }
495 }
496
497 void
fd_batch_resource_write(struct fd_batch * batch,struct fd_resource * rsc)498 fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
499 {
500 struct fd_resource_tracking *track = rsc->track;
501
502 fd_screen_assert_locked(batch->ctx->screen);
503
504 DBG("%p: write %p", batch, rsc);
505
506 /* Must do this before the early out, so we unset a previous resource
507 * invalidate (which may have left the write_batch state in place).
508 */
509 rsc->valid = true;
510
511 if (track->write_batch == batch)
512 return;
513
514 if (rsc->stencil)
515 fd_batch_resource_write(batch, rsc->stencil);
516
517 /* note, invalidate write batch, to avoid further writes to rsc
518 * resulting in a write-after-read hazard.
519 */
520
521 /* if we are pending read or write by any other batch, they need to
522 * be ordered before the current batch:
523 */
524 if (unlikely(track->batch_mask & ~(1 << batch->idx))) {
525 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
526 struct fd_batch *dep;
527
528 if (track->write_batch) {
529 /* Cross-context writes without flush/barrier are undefined.
530 * Lets simply protect ourself from crashing by avoiding cross-
531 * ctx dependencies and let the app have the undefined behavior
532 * it asked for:
533 */
534 if (track->write_batch->ctx != batch->ctx) {
535 fd_ringbuffer_attach_bo(batch->draw, rsc->bo);
536 return;
537 }
538
539 flush_write_batch(rsc);
540 }
541
542 foreach_batch (dep, cache, track->batch_mask) {
543 struct fd_batch *b = NULL;
544 if ((dep == batch) || (dep->ctx != batch->ctx))
545 continue;
546 /* note that batch_add_dep could flush and unref dep, so
547 * we need to hold a reference to keep it live for the
548 * fd_bc_invalidate_batch()
549 */
550 fd_batch_reference(&b, dep);
551 fd_batch_add_dep(batch, b);
552 fd_bc_invalidate_batch(b, false);
553 fd_batch_reference_locked(&b, NULL);
554 }
555 }
556 fd_batch_reference_locked(&track->write_batch, batch);
557
558 fd_batch_add_resource(batch, rsc);
559
560 fd_batch_write_prep(batch, rsc);
561 }
562
563 void
fd_batch_resource_read_slowpath(struct fd_batch * batch,struct fd_resource * rsc)564 fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc)
565 {
566 fd_screen_assert_locked(batch->ctx->screen);
567
568 if (rsc->stencil)
569 fd_batch_resource_read(batch, rsc->stencil);
570
571 DBG("%p: read %p", batch, rsc);
572
573 struct fd_resource_tracking *track = rsc->track;
574
575 /* If reading a resource pending a write, go ahead and flush the
576 * writer. This avoids situations where we end up having to
577 * flush the current batch in _resource_used()
578 */
579 if (unlikely(track->write_batch && track->write_batch != batch)) {
580 if (track->write_batch->ctx != batch->ctx) {
581 /* Reading results from another context without flush/barrier
582 * is undefined. Let's simply protect ourself from crashing
583 * by avoiding cross-ctx dependencies and let the app have the
584 * undefined behavior it asked for:
585 */
586 fd_ringbuffer_attach_bo(batch->draw, rsc->bo);
587 return;
588 }
589
590 flush_write_batch(rsc);
591 }
592
593 fd_batch_add_resource(batch, rsc);
594 }
595
596 void
fd_batch_check_size(struct fd_batch * batch)597 fd_batch_check_size(struct fd_batch *batch)
598 {
599 if (batch->num_draws > 100000) {
600 fd_batch_flush(batch);
601 return;
602 }
603
604 /* Place a reasonable upper bound on prim/draw stream buffer size: */
605 const unsigned limit_bits = 8 * 8 * 1024 * 1024;
606 if ((batch->prim_strm_bits > limit_bits) ||
607 (batch->draw_strm_bits > limit_bits)) {
608 fd_batch_flush(batch);
609 return;
610 }
611
612 if (!fd_ringbuffer_check_size(batch->draw))
613 fd_batch_flush(batch);
614 }
615
616 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
617 * been one since last draw:
618 */
619 void
fd_wfi(struct fd_batch * batch,struct fd_ringbuffer * ring)620 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
621 {
622 if (batch->needs_wfi) {
623 if (batch->ctx->screen->gen >= 5)
624 OUT_WFI5(ring);
625 else
626 OUT_WFI(ring);
627 batch->needs_wfi = false;
628 }
629 }
630