xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/freedreno_draw.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #ifndef FREEDRENO_DRAW_H_
10 #define FREEDRENO_DRAW_H_
11 
12 #include "pipe/p_context.h"
13 #include "pipe/p_state.h"
14 
15 #include "freedreno_context.h"
16 #include "freedreno_resource.h"
17 #include "freedreno_screen.h"
18 #include "freedreno_util.h"
19 
20 struct fd_ringbuffer;
21 
22 void fd_draw_init(struct pipe_context *pctx);
23 
24 #ifndef __cplusplus
25 static inline void
fd_draw(struct fd_batch * batch,struct fd_ringbuffer * ring,enum pc_di_primtype primtype,enum pc_di_vis_cull_mode vismode,enum pc_di_src_sel src_sel,uint32_t count,uint8_t instances,enum pc_di_index_size idx_type,uint32_t idx_size,uint32_t idx_offset,struct pipe_resource * idx_buffer)26 fd_draw(struct fd_batch *batch, struct fd_ringbuffer *ring,
27         enum pc_di_primtype primtype, enum pc_di_vis_cull_mode vismode,
28         enum pc_di_src_sel src_sel, uint32_t count, uint8_t instances,
29         enum pc_di_index_size idx_type, uint32_t idx_size, uint32_t idx_offset,
30         struct pipe_resource *idx_buffer)
31 {
32    /* for debug after a lock up, write a unique counter value
33     * to scratch7 for each draw, to make it easier to match up
34     * register dumps to cmdstream.  The combination of IB
35     * (scratch6) and DRAW is enough to "triangulate" the
36     * particular draw that caused lockup.
37     */
38    emit_marker(ring, 7);
39 
40    if (is_a3xx_p0(batch->ctx->screen)) {
41       /* dummy-draw workaround: */
42       OUT_PKT3(ring, CP_DRAW_INDX, 3);
43       OUT_RING(ring, 0x00000000);
44       OUT_RING(ring, DRAW(1, DI_SRC_SEL_AUTO_INDEX, INDEX_SIZE_IGN,
45                           USE_VISIBILITY, 0));
46       OUT_RING(ring, 0); /* NumIndices */
47 
48       /* ugg, hard-code register offset to avoid pulling in the
49        * a3xx register headers into something #included from a2xx
50        */
51       OUT_PKT0(ring, 0x2206, 1); /* A3XX_HLSQ_CONST_VSPRESV_RANGE_REG */
52       OUT_RING(ring, 0);
53    }
54 
55    if (is_a20x(batch->ctx->screen)) {
56       /* a20x has a different draw command for drawing with binning data
57        * note: if we do patching we will have to insert a NOP
58        *
59        * binning data is is 1 byte/vertex (8x8x4 bin position of vertex)
60        * base ptr set by the CP_SET_DRAW_INIT_FLAGS command
61        *
62        * TODO: investigate the faceness_cull_select parameter to see how
63        * it is used with hw binning to use "faceness" bits
64        */
65       uint32_t size = 2;
66       if (vismode)
67          size += 2;
68       if (idx_buffer)
69          size += 2;
70 
71       BEGIN_RING(ring, size + 1);
72       if (vismode)
73          util_dynarray_append(&batch->draw_patches, uint32_t *, ring->cur);
74 
75       OUT_PKT3(ring, vismode ? CP_DRAW_INDX_BIN : CP_DRAW_INDX, size);
76       OUT_RING(ring, 0x00000000);
77       OUT_RING(ring, DRAW_A20X(primtype, DI_FACE_CULL_NONE, src_sel, idx_type,
78                                vismode, vismode, count));
79       if (vismode == USE_VISIBILITY) {
80          OUT_RING(ring, batch->num_vertices);
81          OUT_RING(ring, count);
82       }
83    } else {
84       OUT_PKT3(ring, CP_DRAW_INDX, idx_buffer ? 5 : 3);
85       OUT_RING(ring, 0x00000000); /* viz query info. */
86       if (vismode == USE_VISIBILITY) {
87          /* leave vis mode blank for now, it will be patched up when
88           * we know if we are binning or not
89           */
90          OUT_RINGP(ring, DRAW(primtype, src_sel, idx_type, 0, instances),
91                    &batch->draw_patches);
92       } else {
93          OUT_RING(ring, DRAW(primtype, src_sel, idx_type, vismode, instances));
94       }
95       OUT_RING(ring, count); /* NumIndices */
96    }
97 
98    if (idx_buffer) {
99       OUT_RELOC(ring, fd_resource(idx_buffer)->bo, idx_offset, 0, 0);
100       OUT_RING(ring, idx_size);
101    }
102 
103    emit_marker(ring, 7);
104 
105    fd_reset_wfi(batch);
106 }
107 
108 static inline enum pc_di_index_size
size2indextype(unsigned index_size)109 size2indextype(unsigned index_size)
110 {
111    switch (index_size) {
112    case 1:
113       return INDEX_SIZE_8_BIT;
114    case 2:
115       return INDEX_SIZE_16_BIT;
116    case 4:
117       return INDEX_SIZE_32_BIT;
118    }
119    DBG("unsupported index size: %d", index_size);
120    assert(0);
121    return INDEX_SIZE_IGN;
122 }
123 
124 /* this is same for a2xx/a3xx, so split into helper: */
125 static inline void
fd_draw_emit(struct fd_batch * batch,struct fd_ringbuffer * ring,enum pc_di_primtype primtype,enum pc_di_vis_cull_mode vismode,const struct pipe_draw_info * info,const struct pipe_draw_start_count_bias * draw,unsigned index_offset)126 fd_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
127              enum pc_di_primtype primtype, enum pc_di_vis_cull_mode vismode,
128              const struct pipe_draw_info *info,
129              const struct pipe_draw_start_count_bias *draw, unsigned index_offset)
130 {
131    struct pipe_resource *idx_buffer = NULL;
132    enum pc_di_index_size idx_type = INDEX_SIZE_IGN;
133    enum pc_di_src_sel src_sel;
134    uint32_t idx_size, idx_offset;
135 
136    if (info->index_size) {
137       assert(!info->has_user_indices);
138 
139       idx_buffer = info->index.resource;
140       idx_type = size2indextype(info->index_size);
141       idx_size = info->index_size * draw->count;
142       idx_offset = index_offset + draw->start * info->index_size;
143       src_sel = DI_SRC_SEL_DMA;
144    } else {
145       idx_buffer = NULL;
146       idx_type = INDEX_SIZE_IGN;
147       idx_size = 0;
148       idx_offset = 0;
149       src_sel = DI_SRC_SEL_AUTO_INDEX;
150    }
151 
152    fd_draw(batch, ring, primtype, vismode, src_sel, draw->count,
153            info->instance_count - 1, idx_type, idx_size, idx_offset,
154            idx_buffer);
155 }
156 #endif
157 
158 static inline void
fd_blend_tracking(struct fd_context * ctx)159 fd_blend_tracking(struct fd_context *ctx)
160    assert_dt
161 {
162    if (ctx->dirty & FD_DIRTY_BLEND) {
163       struct fd_batch *batch = ctx->batch;
164       struct pipe_framebuffer_state *pfb = &batch->framebuffer;
165 
166       if (ctx->blend->logicop_enable)
167          batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
168       for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
169          if (ctx->blend->rt[i].blend_enable)
170             batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
171       }
172    }
173 }
174 
175 #endif /* FREEDRENO_DRAW_H_ */
176