xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/freedreno_draw.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #include "pipe/p_state.h"
10 #include "util/format/u_format.h"
11 #include "util/u_draw.h"
12 #include "util/u_helpers.h"
13 #include "util/u_memory.h"
14 #include "util/u_prim.h"
15 #include "util/u_string.h"
16 
17 #include "freedreno_blitter.h"
18 #include "freedreno_context.h"
19 #include "freedreno_draw.h"
20 #include "freedreno_fence.h"
21 #include "freedreno_query_acc.h"
22 #include "freedreno_query_hw.h"
23 #include "freedreno_resource.h"
24 #include "freedreno_state.h"
25 #include "freedreno_util.h"
26 
27 static bool
batch_references_resource(struct fd_batch * batch,struct pipe_resource * prsc)28 batch_references_resource(struct fd_batch *batch, struct pipe_resource *prsc)
29    assert_dt
30 {
31    return fd_batch_references_resource(batch, fd_resource(prsc));
32 }
33 
34 static void
resource_read(struct fd_batch * batch,struct pipe_resource * prsc)35 resource_read(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt
36 {
37    if (!prsc)
38       return;
39    fd_batch_resource_read(batch, fd_resource(prsc));
40 }
41 
42 static void
resource_written(struct fd_batch * batch,struct pipe_resource * prsc)43 resource_written(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt
44 {
45    if (!prsc)
46       return;
47    fd_batch_resource_write(batch, fd_resource(prsc));
48 }
49 
50 static void
batch_draw_tracking_for_dirty_bits(struct fd_batch * batch)51 batch_draw_tracking_for_dirty_bits(struct fd_batch *batch) assert_dt
52 {
53    struct fd_context *ctx = batch->ctx;
54    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
55    enum fd_dirty_3d_state dirty = ctx->dirty_resource;
56    unsigned buffers = 0, restore_buffers = 0;
57 
58    if (dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) {
59       if (fd_depth_enabled(ctx)) {
60          if (fd_resource(pfb->zsbuf->texture)->valid) {
61             restore_buffers |= FD_BUFFER_DEPTH;
62             /* storing packed d/s depth also stores stencil, so we need
63              * the stencil restored too to avoid invalidating it.
64              */
65             if (pfb->zsbuf->texture->format == PIPE_FORMAT_Z24_UNORM_S8_UINT)
66                restore_buffers |= FD_BUFFER_STENCIL;
67          } else {
68             batch->invalidated |= FD_BUFFER_DEPTH;
69          }
70          batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
71          if (fd_depth_write_enabled(ctx)) {
72             buffers |= FD_BUFFER_DEPTH;
73             resource_written(batch, pfb->zsbuf->texture);
74          } else {
75             resource_read(batch, pfb->zsbuf->texture);
76          }
77       }
78 
79       if (fd_stencil_enabled(ctx)) {
80          if (fd_resource(pfb->zsbuf->texture)->valid) {
81             restore_buffers |= FD_BUFFER_STENCIL;
82             /* storing packed d/s stencil also stores depth, so we need
83              * the depth restored too to avoid invalidating it.
84              */
85             if (pfb->zsbuf->texture->format == PIPE_FORMAT_Z24_UNORM_S8_UINT)
86                restore_buffers |= FD_BUFFER_DEPTH;
87          } else {
88             batch->invalidated |= FD_BUFFER_STENCIL;
89          }
90          batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
91          buffers |= FD_BUFFER_STENCIL;
92          resource_written(batch, pfb->zsbuf->texture);
93       }
94    }
95 
96    if (dirty & FD_DIRTY_FRAMEBUFFER) {
97       for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
98          struct pipe_resource *surf;
99 
100          if (!pfb->cbufs[i])
101             continue;
102 
103          surf = pfb->cbufs[i]->texture;
104 
105          if (fd_resource(surf)->valid) {
106             restore_buffers |= PIPE_CLEAR_COLOR0 << i;
107          } else {
108             batch->invalidated |= PIPE_CLEAR_COLOR0 << i;
109          }
110 
111          buffers |= PIPE_CLEAR_COLOR0 << i;
112 
113          resource_written(batch, pfb->cbufs[i]->texture);
114       }
115    }
116 
117    if (dirty & (FD_DIRTY_CONST | FD_DIRTY_TEX | FD_DIRTY_SSBO | FD_DIRTY_IMAGE)) {
118       u_foreach_bit (s, ctx->bound_shader_stages) {
119          enum fd_dirty_shader_state dirty_shader = ctx->dirty_shader_resource[s];
120 
121          /* Mark constbuf as being read: */
122          if (dirty_shader & FD_DIRTY_SHADER_CONST) {
123             u_foreach_bit (i, ctx->constbuf[s].enabled_mask)
124                resource_read(batch, ctx->constbuf[s].cb[i].buffer);
125          }
126 
127          /* Mark textures as being read */
128          if (dirty_shader & FD_DIRTY_SHADER_TEX) {
129             u_foreach_bit (i, ctx->tex[s].valid_textures)
130                resource_read(batch, ctx->tex[s].textures[i]->texture);
131          }
132 
133          /* Mark SSBOs as being read or written: */
134          if (dirty_shader & FD_DIRTY_SHADER_SSBO) {
135             const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[s];
136 
137             u_foreach_bit (i, so->enabled_mask & so->writable_mask)
138                resource_written(batch, so->sb[i].buffer);
139 
140             u_foreach_bit (i, so->enabled_mask & ~so->writable_mask)
141                resource_read(batch, so->sb[i].buffer);
142          }
143 
144          /* Mark Images as being read or written: */
145          if (dirty_shader & FD_DIRTY_SHADER_IMAGE) {
146             u_foreach_bit (i, ctx->shaderimg[s].enabled_mask) {
147                struct pipe_image_view *img = &ctx->shaderimg[s].si[i];
148                if (img->access & PIPE_IMAGE_ACCESS_WRITE)
149                   resource_written(batch, img->resource);
150                else
151                   resource_read(batch, img->resource);
152             }
153          }
154       }
155    }
156 
157    /* Mark VBOs as being read */
158    if (dirty & FD_DIRTY_VTXBUF) {
159       u_foreach_bit (i, ctx->vtx.vertexbuf.enabled_mask) {
160          assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
161          resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
162       }
163    }
164 
165    /* Mark streamout buffers as being written.. */
166    if (dirty & FD_DIRTY_STREAMOUT) {
167       for (unsigned i = 0; i < ctx->streamout.num_targets; i++) {
168          struct fd_stream_output_target *target =
169             fd_stream_output_target(ctx->streamout.targets[i]);
170 
171          if (target) {
172             resource_written(batch, target->base.buffer);
173             resource_written(batch, target->offset_buf);
174          }
175       }
176    }
177 
178    if (dirty & FD_DIRTY_QUERY) {
179       list_for_each_entry (struct fd_acc_query, aq, &ctx->acc_active_queries, node) {
180          resource_written(batch, aq->prsc);
181       }
182    }
183 
184    /* any buffers that haven't been cleared yet, we need to restore: */
185    batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated);
186    /* and any buffers used, need to be resolved: */
187    batch->resolve |= buffers;
188 }
189 
190 static bool
needs_draw_tracking(struct fd_batch * batch,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect)191 needs_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info,
192                     const struct pipe_draw_indirect_info *indirect)
193    assert_dt
194 {
195    struct fd_context *ctx = batch->ctx;
196 
197    if (ctx->dirty_resource)
198       return true;
199 
200    if (info->index_size && !batch_references_resource(batch, info->index.resource))
201       return true;
202 
203    if (indirect) {
204       if (indirect->buffer && !batch_references_resource(batch, indirect->buffer))
205          return true;
206       if (indirect->indirect_draw_count &&
207           !batch_references_resource(batch, indirect->indirect_draw_count))
208          return true;
209       if (indirect->count_from_stream_output)
210          return true;
211    }
212 
213    return false;
214 }
215 
216 static void
batch_draw_tracking(struct fd_batch * batch,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect)217 batch_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info,
218                     const struct pipe_draw_indirect_info *indirect) assert_dt
219 {
220    struct fd_context *ctx = batch->ctx;
221 
222    if (!needs_draw_tracking(batch, info, indirect))
223       goto out;
224 
225    /*
226     * Figure out the buffers/features we need:
227     */
228 
229    fd_screen_lock(ctx->screen);
230 
231    if (ctx->dirty_resource)
232       batch_draw_tracking_for_dirty_bits(batch);
233 
234    /* Mark index buffer as being read */
235    if (info->index_size)
236       resource_read(batch, info->index.resource);
237 
238    /* Mark indirect draw buffer as being read */
239    if (indirect) {
240       resource_read(batch, indirect->buffer);
241       resource_read(batch, indirect->indirect_draw_count);
242       if (indirect->count_from_stream_output)
243          resource_read(
244             batch, fd_stream_output_target(indirect->count_from_stream_output)
245                       ->offset_buf);
246    }
247 
248    resource_written(batch, batch->query_buf);
249 
250    fd_screen_unlock(ctx->screen);
251 
252 out:
253    fd_batch_update_queries(batch);
254 }
255 
256 static void
update_draw_stats(struct fd_context * ctx,const struct pipe_draw_info * info,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)257 update_draw_stats(struct fd_context *ctx, const struct pipe_draw_info *info,
258                   const struct pipe_draw_start_count_bias *draws,
259                   unsigned num_draws) assert_dt
260 {
261    ctx->stats.draw_calls++;
262 
263    if (ctx->screen->gen < 6) {
264       /* Counting prims in sw doesn't work for GS and tesselation. For older
265        * gens we don't have those stages and don't have the hw counters enabled,
266        * so keep the count accurate for non-patch geometry.
267        */
268       unsigned prims = 0;
269       if ((info->mode != MESA_PRIM_PATCHES) && (info->mode != MESA_PRIM_COUNT)) {
270          for (unsigned i = 0; i < num_draws; i++) {
271             prims += u_reduced_prims_for_vertices(info->mode, draws[i].count);
272          }
273       }
274 
275       ctx->stats.prims_generated += prims;
276 
277       if (ctx->streamout.num_targets > 0) {
278          /* Clip the prims we're writing to the size of the SO buffers. */
279          enum mesa_prim tf_prim = u_decomposed_prim(info->mode);
280          unsigned verts_written = u_vertices_for_prims(tf_prim, prims);
281          unsigned remaining_vert_space =
282             ctx->streamout.max_tf_vtx - ctx->streamout.verts_written;
283          if (verts_written > remaining_vert_space) {
284             verts_written = remaining_vert_space;
285             u_trim_pipe_prim(tf_prim, &remaining_vert_space);
286          }
287          ctx->streamout.verts_written += verts_written;
288 
289          ctx->stats.prims_emitted +=
290             u_reduced_prims_for_vertices(tf_prim, verts_written);
291       }
292    }
293 }
294 
295 static void
fd_draw_vbo(struct pipe_context * pctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)296 fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
297             unsigned drawid_offset,
298             const struct pipe_draw_indirect_info *indirect,
299             const struct pipe_draw_start_count_bias *draws, unsigned num_draws) in_dt
300 {
301    struct fd_context *ctx = fd_context(pctx);
302 
303    /* for debugging problems with indirect draw, it is convenient
304     * to be able to emulate it, to determine if game is feeding us
305     * bogus data:
306     */
307    if (indirect && indirect->buffer && FD_DBG(NOINDR)) {
308       /* num_draws is only applicable for direct draws: */
309       assert(num_draws == 1);
310       util_draw_indirect(pctx, info, drawid_offset, indirect);
311       return;
312    }
313 
314    /* TODO: push down the region versions into the tiles */
315    if (!fd_render_condition_check(pctx))
316       return;
317 
318    /* Upload a user index buffer. */
319    struct pipe_resource *indexbuf = NULL;
320    unsigned index_offset = 0;
321    struct pipe_draw_info new_info;
322    if (info->index_size) {
323       if (info->has_user_indices) {
324          if (num_draws > 1) {
325             util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws);
326             return;
327          }
328          if (!util_upload_index_buffer(pctx, info, &draws[0], &indexbuf,
329                                        &index_offset, 4))
330             return;
331          new_info = *info;
332          new_info.index.resource = indexbuf;
333          new_info.has_user_indices = false;
334          info = &new_info;
335       } else {
336          indexbuf = info->index.resource;
337       }
338    }
339 
340    if ((ctx->streamout.num_targets > 0) && (num_draws > 1)) {
341       util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws);
342       return;
343    }
344 
345    struct fd_batch *batch = fd_context_batch(ctx);
346 
347    batch_draw_tracking(batch, info, indirect);
348 
349    while (unlikely(batch->flushed)) {
350       /* The current batch was flushed in batch_draw_tracking()
351        * so start anew.  We know this won't happen a second time
352        * since we are dealing with a fresh batch:
353        */
354       fd_batch_reference(&batch, NULL);
355       batch = fd_context_batch(ctx);
356       batch_draw_tracking(batch, info, indirect);
357       assert(ctx->batch == batch);
358    }
359 
360    batch->num_draws++;
361    batch->subpass->num_draws++;
362 
363    fd_print_dirty_state(ctx->dirty);
364 
365    /* Marking the batch as needing flush must come after the batch
366     * dependency tracking (resource_read()/resource_write()), as that
367     * can trigger a flush
368     */
369    fd_batch_needs_flush(batch);
370 
371    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
372    DBG("%p: %ux%u num_draws=%u (%s/%s)", batch, pfb->width, pfb->height,
373        batch->num_draws,
374        util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
375        util_format_short_name(pipe_surface_format(pfb->zsbuf)));
376 
377    batch->cost += ctx->draw_cost;
378 
379    ctx->draw_vbos(ctx, info, drawid_offset, indirect, draws, num_draws, index_offset);
380 
381    if (unlikely(ctx->stats_users > 0))
382       update_draw_stats(ctx, info, draws, num_draws);
383 
384    for (unsigned i = 0; i < ctx->streamout.num_targets; i++) {
385       assert(num_draws == 1);
386       ctx->streamout.offsets[i] += draws[0].count;
387    }
388 
389    assert(!batch->flushed);
390 
391    fd_batch_check_size(batch);
392    fd_batch_reference(&batch, NULL);
393 
394    if (info == &new_info)
395       pipe_resource_reference(&indexbuf, NULL);
396 }
397 
398 static void
fd_draw_vbo_dbg(struct pipe_context * pctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)399 fd_draw_vbo_dbg(struct pipe_context *pctx, const struct pipe_draw_info *info,
400                 unsigned drawid_offset,
401                 const struct pipe_draw_indirect_info *indirect,
402                 const struct pipe_draw_start_count_bias *draws, unsigned num_draws)
403    in_dt
404 {
405    fd_draw_vbo(pctx, info, drawid_offset, indirect, draws, num_draws);
406 
407    if (FD_DBG(DDRAW))
408       fd_context_all_dirty(fd_context(pctx));
409 
410    if (FD_DBG(FLUSH))
411       pctx->flush(pctx, NULL, 0);
412 }
413 
414 static void
batch_clear_tracking(struct fd_batch * batch,unsigned buffers)415 batch_clear_tracking(struct fd_batch *batch, unsigned buffers) assert_dt
416 {
417    struct fd_context *ctx = batch->ctx;
418    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
419    unsigned cleared_buffers;
420 
421    /* pctx->clear() is only for full-surface clears, so scissor is
422     * equivalent to having GL_SCISSOR_TEST disabled:
423     */
424    batch->max_scissor.minx = 0;
425    batch->max_scissor.miny = 0;
426    batch->max_scissor.maxx = pfb->width - 1;
427    batch->max_scissor.maxy = pfb->height - 1;
428 
429    /* for bookkeeping about which buffers have been cleared (and thus
430     * can fully or partially skip mem2gmem) we need to ignore buffers
431     * that have already had a draw, in case apps do silly things like
432     * clear after draw (ie. if you only clear the color buffer, but
433     * something like alpha-test causes side effects from the draw in
434     * the depth buffer, etc)
435     */
436    cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
437    batch->cleared |= buffers;
438    batch->invalidated |= cleared_buffers;
439 
440    batch->resolve |= buffers;
441 
442    fd_screen_lock(ctx->screen);
443 
444    if (buffers & PIPE_CLEAR_COLOR)
445       for (unsigned i = 0; i < pfb->nr_cbufs; i++)
446          if (buffers & (PIPE_CLEAR_COLOR0 << i))
447             resource_written(batch, pfb->cbufs[i]->texture);
448 
449    if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
450       resource_written(batch, pfb->zsbuf->texture);
451       batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
452    }
453 
454    resource_written(batch, batch->query_buf);
455 
456    list_for_each_entry (struct fd_acc_query, aq, &ctx->acc_active_queries, node)
457       resource_written(batch, aq->prsc);
458 
459    fd_screen_unlock(ctx->screen);
460 }
461 
462 static void
fd_clear(struct pipe_context * pctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)463 fd_clear(struct pipe_context *pctx, unsigned buffers,
464          const struct pipe_scissor_state *scissor_state,
465          const union pipe_color_union *color, double depth,
466          unsigned stencil) in_dt
467 {
468    struct fd_context *ctx = fd_context(pctx);
469 
470    /* TODO: push down the region versions into the tiles */
471    if (!fd_render_condition_check(pctx))
472       return;
473 
474    struct fd_batch *batch = fd_context_batch(ctx);
475 
476    batch_clear_tracking(batch, buffers);
477 
478    while (unlikely(batch->flushed)) {
479       /* The current batch was flushed in batch_clear_tracking()
480        * so start anew.  We know this won't happen a second time
481        * since we are dealing with a fresh batch:
482        */
483       fd_batch_reference(&batch, NULL);
484       batch = fd_context_batch(ctx);
485       batch_clear_tracking(batch, buffers);
486       assert(ctx->batch == batch);
487    }
488 
489    /* Marking the batch as needing flush must come after the batch
490     * dependency tracking (resource_read()/resource_write()), as that
491     * can trigger a flush
492     */
493    fd_batch_needs_flush(batch);
494 
495    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
496    DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers, pfb->width,
497        pfb->height, depth, stencil,
498        util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
499        util_format_short_name(pipe_surface_format(pfb->zsbuf)));
500 
501    /* if per-gen backend doesn't implement ctx->clear() generic
502     * blitter clear:
503     */
504    bool fallback = true;
505 
506    if (ctx->clear) {
507       fd_batch_update_queries(batch);
508 
509       if (ctx->clear(ctx, buffers, color, depth, stencil)) {
510          if (FD_DBG(DCLEAR))
511             fd_context_all_dirty(ctx);
512 
513          fallback = false;
514       }
515    }
516 
517    assert(!batch->flushed);
518 
519    if (fallback) {
520       fd_blitter_clear(pctx, buffers, color, depth, stencil);
521    }
522 
523    fd_batch_check_size(batch);
524 
525    fd_batch_reference(&batch, NULL);
526 }
527 
528 static void
fd_clear_render_target(struct pipe_context * pctx,struct pipe_surface * ps,const union pipe_color_union * color,unsigned x,unsigned y,unsigned w,unsigned h,bool render_condition_enabled)529 fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
530                        const union pipe_color_union *color, unsigned x,
531                        unsigned y, unsigned w, unsigned h,
532                        bool render_condition_enabled) in_dt
533 {
534    if (render_condition_enabled && !fd_render_condition_check(pctx))
535       return;
536 
537    fd_blitter_clear_render_target(pctx, ps, color, x, y, w, h,
538                                   render_condition_enabled);
539 }
540 
541 static void
fd_clear_depth_stencil(struct pipe_context * pctx,struct pipe_surface * ps,unsigned buffers,double depth,unsigned stencil,unsigned x,unsigned y,unsigned w,unsigned h,bool render_condition_enabled)542 fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
543                        unsigned buffers, double depth, unsigned stencil,
544                        unsigned x, unsigned y, unsigned w, unsigned h,
545                        bool render_condition_enabled) in_dt
546 {
547    if (render_condition_enabled && !fd_render_condition_check(pctx))
548       return;
549 
550    fd_blitter_clear_depth_stencil(pctx, ps, buffers,
551                                   depth, stencil, x, y, w, h,
552                                   render_condition_enabled);
553 }
554 
555 static void
fd_launch_grid(struct pipe_context * pctx,const struct pipe_grid_info * info)556 fd_launch_grid(struct pipe_context *pctx,
557                const struct pipe_grid_info *info) in_dt
558 {
559    struct fd_context *ctx = fd_context(pctx);
560    const struct fd_shaderbuf_stateobj *so =
561       &ctx->shaderbuf[PIPE_SHADER_COMPUTE];
562    struct fd_batch *batch, *save_batch = NULL;
563 
564    if (!fd_render_condition_check(pctx))
565       return;
566 
567    batch = fd_context_batch_nondraw(ctx);
568    fd_batch_reference(&save_batch, ctx->batch);
569    fd_batch_reference(&ctx->batch, batch);
570 
571    fd_screen_lock(ctx->screen);
572 
573    /* Mark SSBOs */
574    u_foreach_bit (i, so->enabled_mask & so->writable_mask)
575       resource_written(batch, so->sb[i].buffer);
576 
577    u_foreach_bit (i, so->enabled_mask & ~so->writable_mask)
578       resource_read(batch, so->sb[i].buffer);
579 
580    u_foreach_bit (i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
581       struct pipe_image_view *img = &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
582       if (img->access & PIPE_IMAGE_ACCESS_WRITE)
583          resource_written(batch, img->resource);
584       else
585          resource_read(batch, img->resource);
586    }
587 
588    /* UBO's are read */
589    u_foreach_bit (i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
590       resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
591 
592    /* Mark textures as being read */
593    u_foreach_bit (i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
594       resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
595 
596    /* For global buffers, we don't really know if read or written, so assume
597     * the worst:
598     */
599    u_foreach_bit (i, ctx->global_bindings.enabled_mask)
600       resource_written(batch, ctx->global_bindings.buf[i]);
601 
602    if (info->indirect)
603       resource_read(batch, info->indirect);
604 
605    list_for_each_entry (struct fd_acc_query, aq, &ctx->acc_active_queries, node) {
606       resource_written(batch, aq->prsc);
607    }
608 
609    /* If the saved batch has been flushed during the resource tracking,
610     * don't re-install it:
611     */
612    if (save_batch && save_batch->flushed)
613       fd_batch_reference_locked(&save_batch, NULL);
614 
615    fd_screen_unlock(ctx->screen);
616 
617    fd_batch_update_queries(batch);
618 
619    DBG("%p: work_dim=%u, block=%ux%ux%u, grid=%ux%ux%u",
620        batch, info->work_dim,
621        info->block[0], info->block[1], info->block[2],
622        info->grid[0], info->grid[1], info->grid[2]);
623 
624    fd_batch_needs_flush(batch);
625    ctx->launch_grid(ctx, info);
626 
627    fd_batch_reference(&ctx->batch, save_batch);
628    fd_batch_reference(&save_batch, NULL);
629    fd_batch_reference(&batch, NULL);
630 }
631 
632 void
fd_draw_init(struct pipe_context * pctx)633 fd_draw_init(struct pipe_context *pctx)
634 {
635    if (FD_DBG(DDRAW) || FD_DBG(FLUSH)) {
636       pctx->draw_vbo = fd_draw_vbo_dbg;
637    } else {
638       pctx->draw_vbo = fd_draw_vbo;
639    }
640 
641    pctx->clear = fd_clear;
642    pctx->clear_render_target = fd_clear_render_target;
643    pctx->clear_depth_stencil = fd_clear_depth_stencil;
644 
645    if (has_compute(fd_screen(pctx->screen))) {
646       pctx->launch_grid = fd_launch_grid;
647    }
648 }
649